max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
utils/statistic.py | ZGSLZL/LGSC-for-FAS | 195 | 11148164 | import math
import numpy as np
def eval_state(probs, labels, thr):
predict = probs >= thr
TN = np.sum((labels == 0) & (predict == False))
FN = np.sum((labels == 1) & (predict == False))
FP = np.sum((labels == 0) & (predict == True))
TP = np.sum((labels == 1) & (predict == True))
return TN, FN, FP, TP
def calculate(probs, labels):
TN, FN, FP, TP = eval_state(probs, labels, 0.5)
APCER = 1.0 if (FP + TN == 0) else FP / float(FP + TN)
NPCER = 1.0 if (FN + TP == 0) else FN / float(FN + TP)
ACER = (APCER + NPCER) / 2.0
ACC = (TP + TN) / labels.shape[0]
return APCER, NPCER, ACER, ACC
def calculate_threshold(probs, labels, threshold):
TN, FN, FP, TP = eval_state(probs, labels, threshold)
ACC = (TP + TN) / labels.shape[0]
return ACC
def get_threshold(probs, grid_density):
Min, Max = min(probs), max(probs)
thresholds = []
for i in range(grid_density + 1):
thresholds.append(0.0 + i * 1.0 / float(grid_density))
thresholds.append(1.1)
return thresholds
def get_EER_states(probs, labels, grid_density=10000):
thresholds = get_threshold(probs, grid_density)
min_dist = 1.0
min_dist_states = []
FRR_list = []
FAR_list = []
for thr in thresholds:
TN, FN, FP, TP = eval_state(probs, labels, thr)
if(FN + TP == 0):
FRR = TPR = 1.0
FAR = FP / float(FP + TN)
TNR = TN / float(TN + FP)
elif(FP + TN == 0):
TNR = FAR = 1.0
FRR = FN / float(FN + TP)
TPR = TP / float(TP + FN)
else:
FAR = FP / float(FP + TN)
FRR = FN / float(FN + TP)
TNR = TN / float(TN + FP)
TPR = TP / float(TP + FN)
dist = math.fabs(FRR - FAR)
FAR_list.append(FAR)
FRR_list.append(FRR)
if dist <= min_dist:
min_dist = dist
min_dist_states = [FAR, FRR, thr]
EER = (min_dist_states[0] + min_dist_states[1]) / 2.0
thr = min_dist_states[2]
return EER, thr, FRR_list, FAR_list
def get_HTER_at_thr(probs, labels, thr):
TN, FN, FP, TP = eval_state(probs, labels, thr)
if (FN + TP == 0):
FRR = 1.0
FAR = FP / float(FP + TN)
elif(FP + TN == 0):
FAR = 1.0
FRR = FN / float(FN + TP)
else:
FAR = FP / float(FP + TN)
FRR = FN / float(FN + TP)
HTER = (FAR + FRR) / 2.0
return HTER
|
deep_privacy/detection/__init__.py | skoskjei/DP-ATT | 1,128 | 11148235 | from .detection_api import BaseDetector, RCNNDetector, ImageAnnotation
from .build import build_detector
|
tests/providers/apache/spark/hooks/test_spark_jdbc_script.py | ChaseKnowlden/airflow | 15,947 | 11148238 | <filename>tests/providers/apache/spark/hooks/test_spark_jdbc_script.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from unittest import mock
import pytest
from pyspark.sql.readwriter import DataFrameReader, DataFrameWriter
from airflow.providers.apache.spark.hooks.spark_jdbc_script import (
SPARK_READ_FROM_JDBC,
SPARK_WRITE_TO_JDBC,
_create_spark_session,
_parse_arguments,
_run_spark,
spark_read_from_jdbc,
spark_write_to_jdbc,
)
@pytest.fixture()
def mock_spark_session():
with mock.patch('airflow.providers.apache.spark.hooks.spark_jdbc_script.SparkSession') as mok:
yield mok
class TestSparkJDBCScrip:
jdbc_arguments = [
'-cmdType',
'spark_to_jdbc',
'-url',
'jdbc:postgresql://localhost:5432/default',
'-user',
'user',
'-password',
'<PASSWORD>',
'-metastoreTable',
'hiveMcHiveFace',
'-jdbcTable',
'tableMcTableFace',
'-jdbcDriver',
'org.postgresql.Driver',
'-jdbcTruncate',
'false',
'-saveMode',
'append',
'-saveFormat',
'parquet',
'-batchsize',
'100',
'-fetchsize',
'200',
'-name',
'airflow-spark-jdbc-script-test',
'-numPartitions',
'10',
'-partitionColumn',
'columnMcColumnFace',
'-lowerBound',
'10',
'-upperBound',
'20',
'-createTableColumnTypes',
'columnMcColumnFace INTEGER(100), name CHAR(64),comments VARCHAR(1024)',
]
default_arguments = {
'cmd_type': 'spark_to_jdbc',
'url': 'jdbc:postgresql://localhost:5432/default',
'user': 'user',
'password': '<PASSWORD>',
'metastore_table': 'hiveMcHiveFace',
'jdbc_table': 'tableMcTableFace',
'jdbc_driver': 'org.postgresql.Driver',
'truncate': 'false',
'save_mode': 'append',
'save_format': 'parquet',
'batch_size': '100',
'fetch_size': '200',
'name': 'airflow-spark-jdbc-script-test',
'num_partitions': '10',
'partition_column': 'columnMcColumnFace',
'lower_bound': '10',
'upper_bound': '20',
'create_table_column_types': 'columnMcColumnFace INTEGER(100), name CHAR(64),'
'comments VARCHAR(1024)',
}
def test_parse_arguments(self):
# When
parsed_arguments = _parse_arguments(args=self.jdbc_arguments)
# Then
for argument_name, argument_value in self.default_arguments.items():
assert getattr(parsed_arguments, argument_name) == argument_value
@mock.patch('airflow.providers.apache.spark.hooks.spark_jdbc_script.spark_write_to_jdbc')
def test_run_spark_write_to_jdbc(self, mock_spark_write_to_jdbc, mock_spark_session):
# Given
arguments = _parse_arguments(['-cmdType', SPARK_WRITE_TO_JDBC] + self.jdbc_arguments[2:])
spark_session = mock_spark_session.builder.appName(arguments.name).enableHiveSupport().getOrCreate()
# When
_run_spark(arguments=arguments)
# Then
mock_spark_write_to_jdbc.assert_called_once_with(
spark_session,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.truncate,
arguments.save_mode,
arguments.batch_size,
arguments.num_partitions,
arguments.create_table_column_types,
)
@mock.patch('airflow.providers.apache.spark.hooks.spark_jdbc_script.spark_read_from_jdbc')
def test_run_spark_read_from_jdbc(self, mock_spark_read_from_jdbc, mock_spark_session):
# Given
arguments = _parse_arguments(['-cmdType', SPARK_READ_FROM_JDBC] + self.jdbc_arguments[2:])
spark_session = mock_spark_session.builder.appName(arguments.name).enableHiveSupport().getOrCreate()
# When
_run_spark(arguments=arguments)
# Then
mock_spark_read_from_jdbc.assert_called_once_with(
spark_session,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.save_mode,
arguments.save_format,
arguments.fetch_size,
arguments.num_partitions,
arguments.partition_column,
arguments.lower_bound,
arguments.upper_bound,
)
@pytest.mark.system("spark")
@mock.patch.object(DataFrameWriter, 'save')
def test_spark_write_to_jdbc(self, mock_writer_save):
# Given
arguments = _parse_arguments(self.jdbc_arguments)
spark_session = _create_spark_session(arguments)
spark_session.sql("CREATE TABLE IF NOT EXISTS " + arguments.metastore_table + " (key INT)")
# When
spark_write_to_jdbc(
spark_session=spark_session,
url=arguments.url,
user=arguments.user,
password=arguments.password,
metastore_table=arguments.metastore_table,
jdbc_table=arguments.jdbc_table,
driver=arguments.jdbc_driver,
truncate=arguments.truncate,
save_mode=arguments.save_mode,
batch_size=arguments.batch_size,
num_partitions=arguments.num_partitions,
create_table_column_types=arguments.create_table_column_types,
)
# Then
mock_writer_save.assert_called_once_with(mode=arguments.save_mode)
@pytest.mark.system("spark")
@mock.patch.object(DataFrameReader, 'load')
def test_spark_read_from_jdbc(self, mock_reader_load):
# Given
arguments = _parse_arguments(self.jdbc_arguments)
spark_session = _create_spark_session(arguments)
spark_session.sql("CREATE TABLE IF NOT EXISTS " + arguments.metastore_table + " (key INT)")
# When
spark_read_from_jdbc(
spark_session,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.save_mode,
arguments.save_format,
arguments.fetch_size,
arguments.num_partitions,
arguments.partition_column,
arguments.lower_bound,
arguments.upper_bound,
)
# Then
mock_reader_load().write.saveAsTable.assert_called_once_with(
arguments.metastore_table, format=arguments.save_format, mode=arguments.save_mode
)
|
s3prl/upstream/distiller/builder.py | Hem7513/s3prl | 856 | 11148252 | """
Builder for Distiller
Author: <NAME> (https://github.com/vectominist)
"""
import sys
import copy
import math
from distutils.util import strtobool
import yaml
import numpy as np
import torch
from torch import nn
from torch.nn.utils.rnn import pad_sequence
from .model import DistillerConfig, DistillerModel
import s3prl.optimizers
class DistillerBuilder(nn.Module):
"""
A builder class for all pre-trained Distiller.
Child classes only need to implement the __init__() and forward() method.
"""
def __init__(self, options, config, verbose=False):
super().__init__()
# read config
if config is not None:
self.config = yaml.load(open(config, "r"), Loader=yaml.FullLoader)
else:
# Since some old checkpoints contained pickled scheduler which needs 'optimizers'
# module which is now moved into s3prl package.
original_optimizer = sys.modules.get("optimizers")
sys.modules["optimizers"] = s3prl.optimizers
self.all_states = torch.load(options["ckpt_file"], map_location="cpu")
self.config = self.all_states["Config"]
del sys.modules["optimizers"]
if original_optimizer is not None:
sys.modules["optimizers"] = original_optimizer
# parse the options dict
self.load = bool(strtobool(options["load_pretrain"]))
self.no_grad = bool(strtobool(options["no_grad"]))
self.permute_input = bool(strtobool(options["permute_input"]))
# Set model config
self.model_config = DistillerConfig(self.config["distiller"])
self.hidden_size = self.model_config.encoder_embed_dim
self.max_input_length = 0
if self.max_input_length > 0 and verbose:
print("[DistillerBuilder] - Maximum input length: ", self.max_input_length)
def load_model(self, model, state_dict, verbose=False):
try:
model.load_state_dict(state_dict)
if verbose:
print("[DistillerBuilder] - Pre-trained weights loaded!")
return model
except:
raise RuntimeError("[DistillerBuilder] - Pre-trained weights NOT loaded!")
def process_input_data(self, wave, wave_len):
"""Process input data for the model"""
# add arbitary batch axis B if input `wave` has shape of T
if wave.dim() == 1:
wave = wave.unsqueeze(0)
elif wave.dim() > 2:
raise ValueError
batch_size = wave.shape[0]
seq_len = wave.shape[1]
pad_mask = np.ones((batch_size, seq_len)) # (batch_size, seq_len)
# zero vectors for padding dimension
for idx in range(wave.shape[0]):
pad_mask[idx, wave_len[idx] :] = 0
wave = wave.to(dtype=torch.float32) # (batch_size, seq_len, 1)
pad_mask = torch.FloatTensor(pad_mask).to(
device=wave.device, dtype=torch.float32
) # (batch_size, seq_len)
return wave, pad_mask # (x, pad_mask)
def _forward(self, x, x_len, get_hidden=False, no_pred=False):
wave, pad_mask = self.process_input_data(x, x_len)
x = self.model(wave, pad_mask, get_hidden=get_hidden, no_pred=no_pred)
# x: (feat, feat_final, pred, pad_mask)
return x
class PretrainedDistiller(DistillerBuilder):
"""
Use this class to extract features from the Distiller model,
or to finetune the pre-trained Distiller with any downstream tasks.
"""
def __init__(self, options, config=None, verbose=False):
super().__init__(options, config, verbose)
# Build model
self.model = DistillerModel(self.model_config)
self.model.eval() if self.no_grad else self.model.train()
self.out_dim = self.hidden_size
# Load from a PyTorch state_dict
if self.load:
self.model = self.load_model(
self.model, self.all_states["Distiller"], verbose
)
if verbose:
print(
"[PretrainedDistiller] - Number of parameters: "
+ str(
sum(
p.numel()
for p in self.model.parameters()
if p.requires_grad
)
)
)
def forward(self, wave_inputs, get_hidden=False, no_pred=False):
wave_len = [len(wave) for wave in wave_inputs]
wave_inputs = pad_sequence(wave_inputs, batch_first=True)
# (batch_size, audio_len)
if self.no_grad:
with torch.no_grad():
x = self._forward(
wave_inputs, wave_len, get_hidden=get_hidden, no_pred=no_pred
)
else:
x = self._forward(
wave_inputs, wave_len, get_hidden=get_hidden, no_pred=no_pred
)
return x
|
code/networks/blocks.py | Maclory/Deep-Iterative-Collaboration | 276 | 11148255 | <reponame>Maclory/Deep-Iterative-Collaboration
import torch
import torch.nn as nn
from collections import OrderedDict
import sys
################
# Basic blocks
################
def activation(act_type='relu', inplace=True, slope=0.2, n_prelu=1):
act_type = act_type.lower()
layer = None
if act_type == 'relu':
layer = nn.ReLU(inplace)
elif act_type == 'lrelu':
layer = nn.LeakyReLU(slope, inplace)
elif act_type == 'prelu':
layer = nn.PReLU(num_parameters=n_prelu, init=slope)
else:
raise NotImplementedError('[ERROR] Activation layer [%s] is not implemented!'%act_type)
return layer
def norm(n_feature, norm_type='bn'):
norm_type = norm_type.lower()
layer = None
if norm_type =='bn':
layer = nn.BatchNorm2d(n_feature)
else:
raise NotImplementedError('[ERROR] Normalization layer [%s] is not implemented!'%norm_type)
return layer
def pad(pad_type, padding):
pad_type = pad_type.lower()
if padding == 0:
return None
layer = None
if pad_type == 'reflect':
layer = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
layer = nn.ReplicationPad2d(padding)
else:
raise NotImplementedError('[ERROR] Padding layer [%s] is not implemented!'%pad_type)
return layer
def sequential(*args):
if len(args) == 1:
if isinstance(args[0], OrderedDict):
raise NotImplementedError('[ERROR] %s.sequential() does not support OrderedDict'%sys.modules[__name__])
else:
return args[0]
modules = []
for module in args:
if isinstance(module, nn.Sequential):
for submodule in module:
modules.append(submodule)
elif isinstance(module, nn.Module):
modules.append(module)
return nn.Sequential(*modules)
def ConvBlock(in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, valid_padding=True, padding=0,\
act_type='relu', norm_type='bn', pad_type='zero', mode='CNA', groups=1):
assert (mode in ['CNA', 'NAC']), '[ERROR] Wrong mode in [%s]!'%sys.modules[__name__]
if valid_padding:
padding = get_valid_padding(kernel_size, dilation)
else:
pass
p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, groups=groups)
if mode == 'CNA':
act = activation(act_type) if act_type else None
n = norm(out_channels, norm_type) if norm_type else None
return sequential(p, conv, n, act)
elif mode == 'NAC':
act = activation(act_type, inplace=False) if act_type else None
n = norm(in_channels, norm_type) if norm_type else None
return sequential(n, act, p, conv)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * 255. * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
################
# Advanced blocks
################
class ResBlock(nn.Module):
def __init__(self, in_channel, out_channel, mid_channel, kernel_size, stride=1, valid_padding=True, padding=0, dilation=1, bias=True, \
pad_type='zero', norm_type='bn', act_type='relu', mode='CNA', res_scale=1, groups=1):
super(ResBlock, self).__init__()
conv0 = ConvBlock(in_channel, mid_channel, kernel_size, stride, dilation, bias, valid_padding, padding, act_type, norm_type, pad_type, mode, groups=groups)
act_type = None
norm_type = None
conv1 = ConvBlock(mid_channel, out_channel, kernel_size, stride, dilation, bias, valid_padding, padding, act_type, norm_type, pad_type, mode, groups=groups)
self.res = sequential(conv0, conv1)
self.res_scale = res_scale
def forward(self, x):
res = self.res(x).mul(self.res_scale)
return x + res
class FeatureHeatmapFusingBlock(nn.Module):
def __init__(self,
feat_channel_in,
num_heatmap,
num_block,
num_mid_channel=None):
super().__init__()
self.num_heatmap = num_heatmap
res_block_channel = feat_channel_in * num_heatmap
if num_mid_channel is None:
self.num_mid_channel = num_heatmap * feat_channel_in
else:
self.num_mid_channel = num_mid_channel
self.conv_in = ConvBlock(feat_channel_in, res_block_channel, 1, norm_type=None, act_type='lrelu')
self.resnet = nn.Sequential(*[
ResBlock(res_block_channel,
res_block_channel,
self.num_mid_channel,
3,
norm_type=None,
act_type='lrelu',
groups=num_heatmap) for _ in range(num_block)
])
def forward(self, feature, heatmap, debug=False):
assert self.num_heatmap == heatmap.size(1)
batch_size = heatmap.size(0)
w, h = feature.shape[-2:]
feature = self.conv_in(feature)
feature = self.resnet(feature) # B * (num_heatmap*feat_channel_in) * h * w
attention = nn.functional.softmax(heatmap, dim=1) # B * num_heatmap * h * w
if debug:
feature = feature.view(batch_size, self.num_heatmap, -1, w, h)
return feature, attention.unsqueeze(2)
else:
feature = feature.view(batch_size, self.num_heatmap, -1, w, h) * attention.unsqueeze(2)
feature = feature.sum(1)
return feature
################
# Upsampler
################
def DeconvBlock(in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, padding=0, \
act_type='relu', norm_type='bn', pad_type='zero', mode='CNA'):
assert (mode in ['CNA', 'NAC']), '[ERROR] Wrong mode in [%s]!'%sys.modules[__name__]
p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
deconv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, padding, dilation=dilation, bias=bias)
if mode == 'CNA':
act = activation(act_type) if act_type else None
n = norm(out_channels, norm_type) if norm_type else None
return sequential(p, deconv, n, act)
elif mode == 'NAC':
act = activation(act_type, inplace=False) if act_type else None
n = norm(in_channels, norm_type) if norm_type else None
return sequential(n, act, p, deconv)
################
# helper funcs
################
def get_valid_padding(kernel_size, dilation):
"""
Padding value to remain feature size.
"""
kernel_size = kernel_size + (kernel_size-1)*(dilation-1)
padding = (kernel_size-1) // 2
return padding
|
examples/slack/channels.py | q0w/snug | 123 | 11148275 | """queries for the 'channels' method family"""
import typing as t
import snug
from .query import paginated_retrieval, json_post
from .types import Channel, Page
from .load import registry
load_channel_list = registry(t.List[Channel])
@paginated_retrieval('channels.list', itemtype=Channel)
def list_(*, cursor: str=None,
exclude_archived: bool=None,
exclude_members: bool=None,
limit: int=None) -> snug.Query[Page[t.List[Channel]]]:
"""list all channels"""
kwargs = {
'exclude_archived': exclude_archived,
'exclude_members': exclude_members,
'limit': limit
}
response = yield {'cursor': cursor, **kwargs}
try:
next_cursor = response['response_metadata']['next_cursor']
except KeyError:
next_query = None
else:
next_query = list_(**kwargs, cursor=next_cursor)
return Page(
load_channel_list(response['channels']),
next_query=next_query,
)
@json_post('channels.create', rtype=Channel, key='channel')
def create(name: str, *,
validate: bool=None) -> snug.Query[Channel]:
"""create a new channel"""
return {'name': name, 'validate': validate}
|
tests/integrations/excepthook/test_excepthook.py | cmalek/sentry-python | 1,213 | 11148278 | <gh_stars>1000+
import pytest
import sys
import subprocess
from textwrap import dedent
def test_excepthook(tmpdir):
app = tmpdir.join("app.py")
app.write(
dedent(
"""
from sentry_sdk import init, transport
def send_event(self, event):
print("capture event was called")
print(event)
transport.HttpTransport._send_event = send_event
init("http://foobar@localhost/123")
frame_value = "LOL"
1/0
"""
)
)
with pytest.raises(subprocess.CalledProcessError) as excinfo:
subprocess.check_output([sys.executable, str(app)], stderr=subprocess.STDOUT)
output = excinfo.value.output
print(output)
assert b"ZeroDivisionError" in output
assert b"LOL" in output
assert b"capture event was called" in output
def test_always_value_excepthook(tmpdir):
app = tmpdir.join("app.py")
app.write(
dedent(
"""
import sys
from sentry_sdk import init, transport
from sentry_sdk.integrations.excepthook import ExcepthookIntegration
def send_event(self, event):
print("capture event was called")
print(event)
transport.HttpTransport._send_event = send_event
sys.ps1 = "always_value_test"
init("http://foobar@localhost/123",
integrations=[ExcepthookIntegration(always_run=True)]
)
frame_value = "LOL"
1/0
"""
)
)
with pytest.raises(subprocess.CalledProcessError) as excinfo:
subprocess.check_output([sys.executable, str(app)], stderr=subprocess.STDOUT)
output = excinfo.value.output
print(output)
assert b"ZeroDivisionError" in output
assert b"LOL" in output
assert b"capture event was called" in output
|
armada_backend/api_health.py | firesoft/armada | 281 | 11148298 | <gh_stars>100-1000
import falcon
from armada_backend.api_base import ApiCommand
from armada_backend.utils import exists_service
from armada_command.consul.consul import consul_put
def _get_consul_health_endpoint(health_check_code):
if health_check_code == 0:
return 'pass'
if health_check_code == 1:
return 'warn'
return 'fail'
class HealthV1(ApiCommand):
def on_put(self, req, resp, microservice_id):
if not exists_service(microservice_id):
resp.status = falcon.HTTP_404
resp.json = {
'error': 'Could not find service "{microservice_id}", try registering it first.'.format(**locals()),
'error_id': 'SERVICE_NOT_FOUND',
}
return
try:
input_json = req.json
health_check_code = input_json['health_check_code']
health_endpoint = _get_consul_health_endpoint(health_check_code)
r = consul_put('agent/check/{health_endpoint}/service:{microservice_id}'.format(**locals()))
r.raise_for_status()
except Exception as e:
resp.json = {'error': 'Could not mark service health check status: {}'.format(repr(e))}
resp.status = falcon.HTTP_500
|
k8s/images/codalab/apps/coopetitions/admin.py | abdulari/codalab-competitions | 333 | 11148332 | <reponame>abdulari/codalab-competitions
from django.contrib import admin
from .models import Like
admin.site.register(Like)
|
examples/py/all-exchanges.py | diwenshi61/ccxt | 24,910 | 11148339 | <gh_stars>1000+
# -*- coding: utf-8 -*-
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt # noqa: E402
print('CCXT Version:', ccxt.__version__)
for exchange_id in ccxt.exchanges:
try:
exchange = getattr(ccxt, exchange_id)()
print(exchange_id)
# do what you want with this exchange
# pprint(dir(exchange))
except Exception as e:
print(e) |
ui/pypesvds/plugins/emailextractor/emailextractor.py | onfire73/pypeskg | 117 | 11148367 | import re
import logging
#import traceback
from pypes.component import Component
log = logging.getLogger(__name__)
class Email(Component):
__metatype__ = 'EXTRACTOR'
def __init__(self):
Component.__init__(self)
# define email regular expression string
emailre = r'((?:[a-zA-Z0-9_\-\.]+)@(?:(?:\[[0-9]{1,3}\.[0-9]{1,3}\.' \
r'[0-9]{1,3}\.)|(?:(?:[a-zA-Z0-9\-]+\.)+))(?:[a-zA-Z]{2,4}' \
r'|[0-9]{1,3})(?:\]?))'
# compile the regular expression
self._reobj = re.compile(emailre)
# set component parameters
self.set_parameter('fields', '')
self.set_parameter('destination', 'emails')
log.info('Component Initialized: %s' % self.__class__.__name__)
def run(self):
while True:
# get parameters outside doc loop for better performace
try:
fields = self.get_parameter('fields')
if fields is None:
raise ValueError, 'Fields not set'
destination = self.get_parameter('destination')
if destination is None:
raise ValueError, 'Destination not set'
# convert to a list of field names
fields = [f.strip() for f in fields.split(',')]
except Exception as e:
log.error('Component Failed: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
# send all docs without processing
for d in self.receive_all('in'):
self.send('out', d)
self.yield_ctrl()
continue # so next time we are called we continue at the top
# loop though each document on input port
for doc in self.receive_all('in'):
try:
# use sets to avoid duplicates
emails = set()
for field in fields:
data = doc.get(field, '')
# search all string objects in a multivalued field
if doc.is_multivalued(field):
for val in data:
if isinstance(val, (str, unicode)):
emails.update(self._reobj.findall(val))
else:
if isinstance(data, (str, unicode)):
emails.update(self._reobj.findall(data))
# add all emails to destination field
if emails:
doc.set(destination, list(emails), multi=True)
except Exception as e:
log.error('Component Failed: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
#log.error(traceback.print_exc())
self.send('out', doc)
self.yield_ctrl()
|
tests/components/huisbaasje/test_data.py | MrDelik/core | 30,023 | 11148374 | <gh_stars>1000+
"""Test data for the tests of the Huisbaasje integration."""
MOCK_CURRENT_MEASUREMENTS = {
"electricity": {
"measurement": {
"time": "2020-11-18T15:17:24.000Z",
"rate": 1011.6666666666667,
"value": 0.0033333333333333335,
"costPerHour": 0.20233333333333337,
"counterValue": 409.17166666631937,
},
"thisDay": {"value": 3.296665869, "cost": 0.6593331738},
"thisWeek": {"value": 17.509996085, "cost": 3.5019992170000003},
"thisMonth": {"value": 103.28830788, "cost": 20.657661576000002},
"thisYear": {"value": 672.9781177300001, "cost": 134.595623546},
},
"electricityIn": {
"measurement": {
"time": "2020-11-18T15:17:24.000Z",
"rate": 1011.6666666666667,
"value": 0.0033333333333333335,
"costPerHour": 0.20233333333333337,
"counterValue": 409.17166666631937,
},
"thisDay": {"value": 2.669999453, "cost": 0.5339998906},
"thisWeek": {"value": 15.328330291, "cost": 3.0656660582},
"thisMonth": {"value": 72.986651896, "cost": 14.5973303792},
"thisYear": {"value": 409.214880212, "cost": 81.84297604240001},
},
"electricityInLow": {
"measurement": None,
"thisDay": {"value": 0.6266664160000001, "cost": 0.1253332832},
"thisWeek": {"value": 2.181665794, "cost": 0.43633315880000006},
"thisMonth": {"value": 30.301655984000003, "cost": 6.060331196800001},
"thisYear": {"value": 263.76323751800004, "cost": 52.75264750360001},
},
"electricityOut": {
"measurement": None,
"thisDay": {"value": 1.51234, "cost": 0.0},
"thisWeek": {"value": 2.5, "cost": 0.0},
"thisMonth": {"value": 3.5, "cost": 0.0},
"thisYear": {"value": 4.5, "cost": 0.0},
},
"electricityOutLow": {
"measurement": None,
"thisDay": {"value": 1.09281, "cost": 0.0},
"thisWeek": {"value": 2.0, "cost": 0.0},
"thisMonth": {"value": 3.0, "cost": 0.0},
"thisYear": {"value": 4.0, "cost": 0.0},
},
"gas": {
"measurement": {
"time": "2020-11-18T15:17:29.000Z",
"rate": 0.0,
"value": 0.0,
"costPerHour": 0.0,
"counterValue": 116.73000000002281,
},
"thisDay": {"value": 1.07, "cost": 0.642},
"thisWeek": {"value": 5.634224386000001, "cost": 3.3805346316000007},
"thisMonth": {"value": 39.14, "cost": 23.483999999999998},
"thisYear": {"value": 116.73, "cost": 70.038},
},
}
MOCK_LIMITED_CURRENT_MEASUREMENTS = {
"electricity": {
"measurement": {
"time": "2020-11-18T15:17:24.000Z",
"rate": 1011.6666666666667,
"value": 0.0033333333333333335,
"costPerHour": 0.20233333333333337,
"counterValue": 409.17166666631937,
},
"thisDay": {"value": 3.296665869, "cost": 0.6593331738},
"thisWeek": {"value": 17.509996085, "cost": 3.5019992170000003},
"thisMonth": {"value": 103.28830788, "cost": 20.657661576000002},
"thisYear": {"value": 672.9781177300001, "cost": 134.595623546},
}
}
|
lang/py/avro/codecs.py | gkomlossi/avro | 1,960 | 11148400 | #!/usr/bin/env python3
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains Codecs for Python Avro.
Note that the word "codecs" means "compression/decompression algorithms" in the
Avro world (https://avro.apache.org/docs/current/spec.html#Object+Container+Files),
so don't confuse it with the Python's "codecs", which is a package mainly for
converting charsets (https://docs.python.org/3/library/codecs.html).
"""
import abc
import binascii
import io
import struct
import zlib
from typing import Dict, Tuple, Type
import avro.errors
import avro.io
#
# Constants
#
STRUCT_CRC32 = struct.Struct(">I") # big-endian unsigned int
def _check_crc32(bytes_: bytes, checksum: bytes) -> None:
if binascii.crc32(bytes_) & 0xFFFFFFFF != STRUCT_CRC32.unpack(checksum)[0]:
raise avro.errors.AvroException("Checksum failure")
try:
import bz2
has_bzip2 = True
except ImportError:
has_bzip2 = False
try:
import snappy
has_snappy = True
except ImportError:
has_snappy = False
try:
import zstandard as zstd
has_zstandard = True
except ImportError:
has_zstandard = False
class Codec(abc.ABC):
"""Abstract base class for all Avro codec classes."""
@staticmethod
@abc.abstractmethod
def compress(data: bytes) -> Tuple[bytes, int]:
"""Compress the passed data.
:param data: a byte string to be compressed
:type data: str
:rtype: tuple
:return: compressed data and its length
"""
@staticmethod
@abc.abstractmethod
def decompress(readers_decoder: avro.io.BinaryDecoder) -> avro.io.BinaryDecoder:
"""Read compressed data via the passed BinaryDecoder and decompress it.
:param readers_decoder: a BinaryDecoder object currently being used for
reading an object container file
:type readers_decoder: avro.io.BinaryDecoder
:rtype: avro.io.BinaryDecoder
:return: a newly instantiated BinaryDecoder object that contains the
decompressed data which is wrapped by a StringIO
"""
class NullCodec(Codec):
@staticmethod
def compress(data: bytes) -> Tuple[bytes, int]:
return data, len(data)
@staticmethod
def decompress(readers_decoder: avro.io.BinaryDecoder) -> avro.io.BinaryDecoder:
readers_decoder.skip_long()
return readers_decoder
class DeflateCodec(Codec):
@staticmethod
def compress(data: bytes) -> Tuple[bytes, int]:
# The first two characters and last character are zlib
# wrappers around deflate data.
compressed_data = zlib.compress(data)[2:-1]
return compressed_data, len(compressed_data)
@staticmethod
def decompress(readers_decoder: avro.io.BinaryDecoder) -> avro.io.BinaryDecoder:
# Compressed data is stored as (length, data), which
# corresponds to how the "bytes" type is encoded.
data = readers_decoder.read_bytes()
# -15 is the log of the window size; negative indicates
# "raw" (no zlib headers) decompression. See zlib.h.
uncompressed = zlib.decompress(data, -15)
return avro.io.BinaryDecoder(io.BytesIO(uncompressed))
if has_bzip2:
class BZip2Codec(Codec):
@staticmethod
def compress(data: bytes) -> Tuple[bytes, int]:
compressed_data = bz2.compress(data)
return compressed_data, len(compressed_data)
@staticmethod
def decompress(readers_decoder: avro.io.BinaryDecoder) -> avro.io.BinaryDecoder:
length = readers_decoder.read_long()
data = readers_decoder.read(length)
uncompressed = bz2.decompress(data)
return avro.io.BinaryDecoder(io.BytesIO(uncompressed))
if has_snappy:
class SnappyCodec(Codec):
@staticmethod
def compress(data: bytes) -> Tuple[bytes, int]:
compressed_data = snappy.compress(data)
# A 4-byte, big-endian CRC32 checksum
compressed_data += STRUCT_CRC32.pack(binascii.crc32(data) & 0xFFFFFFFF)
return compressed_data, len(compressed_data)
@staticmethod
def decompress(readers_decoder: avro.io.BinaryDecoder) -> avro.io.BinaryDecoder:
# Compressed data includes a 4-byte CRC32 checksum
length = readers_decoder.read_long()
data = readers_decoder.read(length - 4)
uncompressed = snappy.decompress(data)
checksum = readers_decoder.read(4)
_check_crc32(uncompressed, checksum)
return avro.io.BinaryDecoder(io.BytesIO(uncompressed))
if has_zstandard:
class ZstandardCodec(Codec):
@staticmethod
def compress(data: bytes) -> Tuple[bytes, int]:
compressed_data = zstd.ZstdCompressor().compress(data)
return compressed_data, len(compressed_data)
@staticmethod
def decompress(readers_decoder: avro.io.BinaryDecoder) -> avro.io.BinaryDecoder:
length = readers_decoder.read_long()
data = readers_decoder.read(length)
uncompressed = bytearray()
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(io.BytesIO(data)) as reader:
while True:
chunk = reader.read(16384)
if not chunk:
break
uncompressed.extend(chunk)
return avro.io.BinaryDecoder(io.BytesIO(uncompressed))
KNOWN_CODECS: Dict[str, Type[Codec]] = {
name[:-5].lower(): class_
for name, class_ in globals().items()
if class_ != Codec and name.endswith("Codec") and isinstance(class_, type) and issubclass(class_, Codec)
}
def get_codec(codec_name: str) -> Type[Codec]:
try:
return KNOWN_CODECS[codec_name]
except KeyError:
raise avro.errors.UnsupportedCodec(f"Unsupported codec: {codec_name}. (Is it installed?)")
|
realtime/rt/aligner.py | kho/cdec | 114 | 11148420 | <reponame>kho/cdec<filename>realtime/rt/aligner.py
import logging
import os
import sys
import subprocess
import threading
import util
logger = logging.getLogger('rt.aligner')
class ForceAligner:
def __init__(self, fwd_params, fwd_err, rev_params, rev_err, heuristic='grow-diag-final-and'):
cdec_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
fast_align = os.path.join(cdec_root, 'word-aligner', 'fast_align')
atools = os.path.join(cdec_root, 'utils', 'atools')
(fwd_T, fwd_m) = self.read_err(fwd_err)
(rev_T, rev_m) = self.read_err(rev_err)
fwd_cmd = [fast_align, '-i', '-', '-d', '-T', fwd_T, '-m', fwd_m, '-f', fwd_params]
rev_cmd = [fast_align, '-i', '-', '-d', '-T', rev_T, '-m', rev_m, '-f', rev_params, '-r']
tools_cmd = [atools, '-i', '-', '-j', '-', '-c', heuristic]
logger.info('Executing: {}'.format(' '.join(fwd_cmd)))
self.fwd_align = util.popen_io(fwd_cmd)
logger.info('Executing: {}'.format(' '.join(rev_cmd)))
self.rev_align = util.popen_io(rev_cmd)
logger.info('Executing: {}'.format(' '.join(tools_cmd)))
self.tools = util.popen_io(tools_cmd)
# Used to guarantee thread safety
self.lock = util.FIFOLock()
def align(self, source, target):
'''Threadsafe, FIFO'''
return self.align_formatted('{} ||| {}'.format(source, target))
def align_formatted(self, line):
'''Threadsafe, FIFO'''
self.lock.acquire()
self.fwd_align.stdin.write('{}\n'.format(line))
self.rev_align.stdin.write('{}\n'.format(line))
# f words ||| e words ||| links ||| score
fwd_line = self.fwd_align.stdout.readline().split('|||')[2].strip()
rev_line = self.rev_align.stdout.readline().split('|||')[2].strip()
self.tools.stdin.write('{}\n'.format(fwd_line))
self.tools.stdin.write('{}\n'.format(rev_line))
al_line = self.tools.stdout.readline().strip()
self.lock.release()
return al_line
def close(self, force=False):
if not force:
self.lock.acquire()
self.fwd_align.stdin.close()
self.fwd_align.wait()
self.rev_align.stdin.close()
self.rev_align.wait()
self.tools.stdin.close()
self.tools.wait()
if not force:
self.lock.release()
def read_err(self, err):
(T, m) = ('', '')
for line in open(err):
# expected target length = source length * N
if 'expected target length' in line:
m = line.split()[-1]
# final tension: N
elif 'final tension' in line:
T = line.split()[-1]
return (T, m)
|
samples/subscription/billing_agreements/search_transactions.py | Hey-Marvelous/PayPal-Python-SDK | 653 | 11148464 | from paypalrestsdk import BillingAgreement
import logging
BILLING_AGREEMENT_ID = "I-HT38K76XPMGJ"
try:
billing_agreement = BillingAgreement.find(BILLING_AGREEMENT_ID)
start_date = "2014-07-01"
end_date = "2014-07-20"
transactions = billing_agreement.search_transactions(start_date, end_date)
for transaction in transactions.agreement_transaction_list:
print(" -> Transaction[%s]" % (transaction.transaction_id))
except ResourceNotFound as error:
print("Billing Agreement Not Found")
|
venv/lib/python3.9/site-packages/pytube/version.py | dajor/youtube2 | 4,079 | 11148474 | <gh_stars>1000+
__version__ = "12.1.0"
if __name__ == "__main__":
print(__version__)
|
components/aws/sagemaker/tests/integration_tests/utils/argo_utils.py | Strasser-Pablo/pipelines | 2,860 | 11148476 | <gh_stars>1000+
import utils
def print_workflow_logs(workflow_name):
output = get_workflow_logs(workflow_name)
print(f"workflow logs:\n", output.decode())
def find_in_logs(workflow_name, sub_str):
logs = get_workflow_logs(workflow_name).decode()
return logs.find(sub_str) >= 0
def get_workflow_logs(workflow_name):
return utils.run_command(
f"argo logs {workflow_name} -n {utils.get_kfp_namespace()}"
)
def error_in_cw_logs(workflow_name):
ERROR_MESSAGE = "Error in fetching CloudWatch logs for SageMaker job"
return find_in_logs(workflow_name, ERROR_MESSAGE)
|
10_pipeline/sagemaker_mlops/sagemaker-project-modelbuild/pipelines/dsoaws/evaluate_model_metrics.py | ichen20/oreilly_book | 2,327 | 11148482 | <reponame>ichen20/oreilly_book
import functools
import multiprocessing
from datetime import datetime
import subprocess
import sys
subprocess.check_call([sys.executable, "-m", "conda", "install", "-c", "anaconda", "tensorflow==2.3.0", "-y"])
import tensorflow as tf
from tensorflow import keras
subprocess.check_call([sys.executable, "-m", "conda", "install", "-c", "conda-forge", "transformers==3.5.1", "-y"])
from transformers import DistilBertTokenizer
from transformers import DistilBertConfig
subprocess.check_call([sys.executable, "-m", "pip", "install", "matplotlib==3.2.1"])
import pandas as pd
import os
import re
import collections
import argparse
import json
import os
import numpy as np
import csv
import glob
from pathlib import Path
import tarfile
import itertools
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from tensorflow import keras
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
CLASSES = [1, 2, 3, 4, 5]
config = DistilBertConfig.from_pretrained(
"distilbert-base-uncased",
num_labels=len(CLASSES),
id2label={0: 1, 1: 2, 2: 3, 3: 4, 4: 5},
label2id={1: 0, 2: 1, 3: 2, 4: 3, 5: 4},
)
def list_arg(raw_value):
"""argparse type for a list of strings"""
return str(raw_value).split(",")
def parse_args():
# Unlike SageMaker training jobs (which have `SM_HOSTS` and `SM_CURRENT_HOST` env vars), processing jobs to need to parse the resource config file directly
resconfig = {}
try:
with open("/opt/ml/config/resourceconfig.json", "r") as cfgfile:
resconfig = json.load(cfgfile)
except FileNotFoundError:
print("/opt/ml/config/resourceconfig.json not found. current_host is unknown.")
pass # Ignore
# Local testing with CLI args
parser = argparse.ArgumentParser(description="Process")
parser.add_argument(
"--hosts",
type=list_arg,
default=resconfig.get("hosts", ["unknown"]),
help="Comma-separated list of host names running the job",
)
parser.add_argument(
"--current-host",
type=str,
default=resconfig.get("current_host", "unknown"),
help="Name of this host running the job",
)
parser.add_argument(
"--input-data",
type=str,
default="/opt/ml/processing/input/data",
)
parser.add_argument(
"--input-model",
type=str,
default="/opt/ml/processing/input/model",
)
parser.add_argument(
"--output-data",
type=str,
default="/opt/ml/processing/output",
)
parser.add_argument(
"--max-seq-length",
type=int,
default=64,
)
return parser.parse_args()
def process(args):
print("Current host: {}".format(args.current_host))
print("input_data: {}".format(args.input_data))
print("input_model: {}".format(args.input_model))
print("Listing contents of input model dir: {}".format(args.input_model))
input_files = os.listdir(args.input_model)
for file in input_files:
print(file)
model_tar_path = "{}/model.tar.gz".format(args.input_model)
model_tar = tarfile.open(model_tar_path)
model_tar.extractall(args.input_model)
model_tar.close()
model = keras.models.load_model("{}/tensorflow/saved_model/0".format(args.input_model))
print(model)
def predict(text):
encode_plus_tokens = tokenizer.encode_plus(
text, pad_to_max_length=True, max_length=args.max_seq_length, truncation=True, return_tensors="tf"
)
# The id from the pre-trained BERT vocabulary that represents the token. (Padding of 0 will be used if the # of tokens is less than `max_seq_length`)
input_ids = encode_plus_tokens["input_ids"]
# Specifies which tokens BERT should pay attention to (0 or 1). Padded `input_ids` will have 0 in each of these vector elements.
input_mask = encode_plus_tokens["attention_mask"]
outputs = model.predict(x=(input_ids, input_mask))
prediction = [{"label": config.id2label[item.argmax()], "score": item.max().item()} for item in outputs]
return prediction[0]["label"]
print(
"""I loved it! I will recommend this to everyone.""",
predict("""I loved it! I will recommend this to everyone."""),
)
print("""It's OK.""", predict("""It's OK."""))
print(
"""Really bad. I hope they don't make this anymore.""",
predict("""Really bad. I hope they don't make this anymore."""),
)
###########################################################################################
# TODO: Replace this with glob for all files and remove test_data/ from the model.tar.gz #
###########################################################################################
# evaluation_data_path = '/opt/ml/processing/input/data/'
print("Listing contents of input data dir: {}".format(args.input_data))
input_files = os.listdir(args.input_data)
test_data_path = "{}/amazon_reviews_us_Digital_Software_v1_00.tsv.gz".format(args.input_data)
print("Using only {} to evaluate.".format(test_data_path))
df_test_reviews = pd.read_csv(test_data_path, delimiter="\t", quoting=csv.QUOTE_NONE, compression="gzip")[
["review_body", "star_rating"]
]
df_test_reviews = df_test_reviews.sample(n=100)
df_test_reviews.shape
df_test_reviews.head()
y_test = df_test_reviews["review_body"].map(predict)
y_test
y_actual = df_test_reviews["star_rating"]
y_actual
print(classification_report(y_true=y_test, y_pred=y_actual))
accuracy = accuracy_score(y_true=y_test, y_pred=y_actual)
print("Test accuracy: ", accuracy)
def plot_conf_mat(cm, classes, title, cmap):
print(cm)
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = "d"
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
format(cm[i, j], fmt),
horizontalalignment="center",
color="black" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
cm = confusion_matrix(y_true=y_test, y_pred=y_actual)
plt.figure()
fig, ax = plt.subplots(figsize=(10, 5))
plot_conf_mat(cm, classes=CLASSES, title="Confusion Matrix", cmap=plt.cm.Greens)
# Save the confusion matrix
plt.show()
# Model Output
metrics_path = os.path.join(args.output_data, "metrics/")
os.makedirs(metrics_path, exist_ok=True)
plt.savefig("{}/confusion_matrix.png".format(metrics_path))
report_dict = {
"metrics": {
"accuracy": {
"value": accuracy,
},
},
}
evaluation_path = "{}/evaluation.json".format(metrics_path)
with open(evaluation_path, "w") as f:
f.write(json.dumps(report_dict))
print("Listing contents of output dir: {}".format(args.output_data))
output_files = os.listdir(args.output_data)
for file in output_files:
print(file)
print("Listing contents of output/metrics dir: {}".format(metrics_path))
output_files = os.listdir("{}".format(metrics_path))
for file in output_files:
print(file)
print("Complete")
if __name__ == "__main__":
args = parse_args()
print("Loaded arguments:")
print(args)
print("Environment variables:")
print(os.environ)
process(args)
|
nuplan/planning/script/builders/training_builder.py | motional/nuplan-devkit | 128 | 11148483 | <reponame>motional/nuplan-devkit
import logging
from pathlib import Path
from typing import cast
import pytorch_lightning as pl
import pytorch_lightning.loggers
import pytorch_lightning.plugins
import torch
from omegaconf import DictConfig, OmegaConf
from nuplan.planning.script.builders.data_augmentation_builder import build_agent_augmentor
from nuplan.planning.script.builders.objectives_builder import build_objectives
from nuplan.planning.script.builders.scenario_builder import build_scenarios
from nuplan.planning.script.builders.splitter_builder import build_splitter
from nuplan.planning.script.builders.training_callback_builder import build_callbacks
from nuplan.planning.script.builders.training_metrics_builder import build_training_metrics
from nuplan.planning.script.builders.utils.utils_checkpoint import extract_last_checkpoint_from_experiment
from nuplan.planning.training.data_loader.datamodule import DataModule
from nuplan.planning.training.modeling.lightning_module_wrapper import LightningModuleWrapper
from nuplan.planning.training.modeling.torch_module_wrapper import TorchModuleWrapper
from nuplan.planning.training.preprocessing.feature_preprocessor import FeaturePreprocessor
from nuplan.planning.utils.multithreading.worker_pool import WorkerPool
logger = logging.getLogger(__name__)
def build_lightning_datamodule(
cfg: DictConfig, worker: WorkerPool, model: TorchModuleWrapper
) -> pl.LightningDataModule:
"""
Build the lightning datamodule from the config.
:param cfg: Omegaconf dictionary.
:param model: NN model used for training.
:param worker: Worker to submit tasks which can be executed in parallel.
:return: Instantiated datamodule object.
"""
# Build features and targets
feature_builders = model.get_list_of_required_feature()
target_builders = model.get_list_of_computed_target()
# Build splitter
splitter = build_splitter(cfg.splitter)
# Create feature preprocessor
feature_preprocessor = FeaturePreprocessor(
cache_path=cfg.cache.cache_path,
force_feature_computation=cfg.cache.force_feature_computation,
feature_builders=feature_builders,
target_builders=target_builders,
)
# Create data augmentation
augmentors = build_agent_augmentor(cfg.data_augmentation) if 'data_augmentation' in cfg else None
# Build dataset scenarios
scenarios = build_scenarios(cfg, worker, model)
# Create datamodule
datamodule: pl.LightningDataModule = DataModule(
feature_preprocessor=feature_preprocessor,
splitter=splitter,
all_scenarios=scenarios,
dataloader_params=cfg.data_loader.params,
augmentors=augmentors,
**cfg.data_loader.datamodule,
)
return datamodule
def build_lightning_module(cfg: DictConfig, torch_module_wrapper: TorchModuleWrapper) -> pl.LightningModule:
"""
Builds the lightning module from the config.
:param cfg: omegaconf dictionary
:param torch_module_wrapper: NN model used for training
:return: built object.
"""
# Build loss
objectives = build_objectives(cfg)
# Build metrics to evaluate the performance of predictions
metrics = build_training_metrics(cfg)
# Create the complete Module
model = LightningModuleWrapper(
model=torch_module_wrapper,
objectives=objectives,
metrics=metrics,
**cfg.lightning.hparams,
)
return cast(pl.LightningModule, model)
def build_trainer(cfg: DictConfig) -> pl.Trainer:
"""
Builds the lightning trainer from the config.
:param cfg: omegaconf dictionary
:return: built object.
"""
params = cfg.lightning.trainer.params
callbacks = build_callbacks(cfg.callbacks)
if params.gpus:
callbacks.append(pl.callbacks.GPUStatsMonitor(intra_step_time=True, inter_step_time=True))
plugins = [
pl.plugins.DDPPlugin(find_unused_parameters=False),
]
loggers = [
pl.loggers.TensorBoardLogger(
save_dir=cfg.group,
name=cfg.experiment,
log_graph=False,
version='',
prefix='',
),
]
if cfg.lightning.trainer.overfitting.enable:
OmegaConf.set_struct(cfg, False)
params = OmegaConf.merge(params, cfg.lightning.trainer.overfitting.params)
params.check_val_every_n_epoch = params.max_epochs + 1
OmegaConf.set_struct(cfg, True)
return pl.Trainer(plugins=plugins, **params)
if cfg.lightning.trainer.checkpoint.resume_training:
# Resume training from latest checkpoint
output_dir = Path(cfg.output_dir)
date_format = cfg.date_format
OmegaConf.set_struct(cfg, False)
last_checkpoint = extract_last_checkpoint_from_experiment(output_dir, date_format)
if not last_checkpoint:
raise ValueError('Resume Training is enabled but no checkpoint was found!')
params.resume_from_checkpoint = str(last_checkpoint)
latest_epoch = torch.load(last_checkpoint)['epoch']
params.max_epochs += latest_epoch
logger.info(f'Resuming at epoch {latest_epoch} from checkpoint {last_checkpoint}')
OmegaConf.set_struct(cfg, True)
trainer = pl.Trainer(
callbacks=callbacks,
plugins=plugins,
logger=loggers,
**params,
)
return trainer
|
base_models/layers.py | YangLiangwei/DGFraud | 447 | 11148490 | '''
This code is due to <NAME> (@yutongD), <NAME> (@YingtongDou) and UIC BDSC Lab
DGFraud (A Deep Graph-based Toolbox for Fraud Detection)
https://github.com/safe-graph/DGFraud
'''
from base_models.inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
'''Code about GCN is adapted from tkipf/gcn.'''
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1. / keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def _call(self, inputs, adj_info):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, index=0, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, norm=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
self.support = placeholders['a']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.norm = norm
self.index = index
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for i in range(1):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
name='weights_' + str(i))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1 - self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1 - self.dropout)
# convolve
supports = list()
for i in range(1):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
support = dot(self.support[self.index], pre_sup, sparse=False)
supports.append(support)
output = tf.add_n(supports)
axis = list(range(len(output.get_shape()) - 1))
mean, variance = tf.nn.moments(output, axis)
scale = None
offset = None
variance_epsilon = 0.001
output = tf.nn.batch_normalization(output, mean, variance, offset, scale, variance_epsilon)
# bias
if self.bias:
output += self.vars['bias']
if self.norm:
# return self.act(output)/tf.reduce_sum(self.act(output))
return tf.nn.l2_normalize(self.act(output), axis=None, epsilon=1e-12)
return self.act(output)
class AttentionLayer(Layer):
""" AttentionLayer is a function f : hkey × Hval → hval which maps
a feature vector hkey and the set of candidates’ feature vectors
Hval to an weighted sum of elements in Hval.
"""
def attention(inputs, attention_size, v_type=None, return_weights=False, bias=True, joint_type='weighted_sum',
multi_view=True):
if multi_view:
inputs = tf.expand_dims(inputs, 0)
hidden_size = inputs.shape[-1].value
# Trainable parameters
w_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
v = tf.tensordot(inputs, w_omega, axes=1)
if bias is True:
v += b_omega
if v_type is 'tanh':
v = tf.tanh(v)
if v_type is 'relu':
v = tf.nn.relu(v)
vu = tf.tensordot(v, u_omega, axes=1, name='vu')
weights = tf.nn.softmax(vu, name='alphas')
if joint_type is 'weighted_sum':
output = tf.reduce_sum(inputs * tf.expand_dims(weights, -1), 1)
if joint_type is 'concatenation':
output = tf.concat(inputs * tf.expand_dims(weights, -1), 2)
if not return_weights:
return output
else:
return output, weights
def node_attention(inputs, adj, return_weights=False):
hidden_size = inputs.shape[-1].value
H_v = tf.Variable(tf.random_normal([hidden_size, 1], stddev=0.1))
# convert adj to sparse tensor
zero = tf.constant(0, dtype=tf.float32)
where = tf.not_equal(adj, zero)
indices = tf.where(where)
values = tf.gather_nd(adj, indices)
adj = tf.SparseTensor(indices=indices,
values=values,
dense_shape=adj.shape)
with tf.name_scope('v'):
v = adj * tf.squeeze(tf.tensordot(inputs, H_v, axes=1))
weights = tf.sparse_softmax(v, name='alphas') # [nodes,nodes]
output = tf.sparse_tensor_dense_matmul(weights, inputs)
if not return_weights:
return output
else:
return output, weights
# view-level attention (equation (4) in SemiGNN)
def view_attention(inputs, encoding1, encoding2, layer_size, meta, return_weights=False):
h = inputs
encoding = [encoding1, encoding2]
for l in range(layer_size):
v = []
for i in range(meta):
input = h[i]
v_i = tf.layers.dense(inputs=input, units=encoding[l], activation=tf.nn.relu)
v.append(v_i)
h = v
h = tf.concat(h, 0)
h = tf.reshape(h, [meta, inputs[0].shape[0].value, encoding2])
phi = tf.Variable(tf.random_normal([encoding2, ], stddev=0.1))
weights = tf.nn.softmax(h * phi, name='alphas')
output = tf.reshape(h * weights, [1, inputs[0].shape[0] * encoding2 * meta])
if not return_weights:
return output
else:
return output, weights
def scaled_dot_product_attention(q, k, v, mask):
qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention = qk / tf.math.sqrt(dk)
if mask is not None:
scaled_attention += 1
weights = tf.nn.softmax(scaled_attention, axis=-1)
output = tf.matmul(weights, v)
return output, weights
class ConcatenationAggregator(Layer):
"""This layer equals to the equation (3) in
paper 'Spam Review Detection with Graph Convolutional Networks.'
"""
def __init__(self, input_dim, output_dim, review_item_adj, review_user_adj,
review_vecs, user_vecs, item_vecs, dropout=0., act=tf.nn.relu,
name=None, concat=False, **kwargs):
super(ConcatenationAggregator, self).__init__(**kwargs)
self.review_item_adj = review_item_adj
self.review_user_adj = review_user_adj
self.review_vecs = review_vecs
self.user_vecs = user_vecs
self.item_vecs = item_vecs
self.dropout = dropout
self.act = act
self.concat = concat
if name is not None:
name = '/' + name
else:
name = ''
with tf.variable_scope(self.name + name + '_vars'):
self.vars['con_agg_weights'] = glorot([input_dim, output_dim],
name='con_agg_weights')
if self.logging:
self._log_vars()
self.input_dim = input_dim
self.output_dim = output_dim
def _call(self, inputs):
review_vecs = tf.nn.dropout(self.review_vecs, 1 - self.dropout)
user_vecs = tf.nn.dropout(self.user_vecs, 1 - self.dropout)
item_vecs = tf.nn.dropout(self.item_vecs, 1 - self.dropout)
# neighbor sample
ri = tf.nn.embedding_lookup(item_vecs,
tf.cast(self.review_item_adj, dtype=tf.int32))
ri = tf.transpose(tf.random_shuffle(tf.transpose(ri)))
ru = tf.nn.embedding_lookup(user_vecs, tf.cast(self.review_user_adj, dtype=tf.int32))
ru = tf.transpose(tf.random_shuffle(tf.transpose(ru)))
concate_vecs = tf.concat([review_vecs, ru, ri], axis=1)
# [nodes] x [out_dim]
output = tf.matmul(concate_vecs, self.vars['con_agg_weights'])
return self.act(output)
class AttentionAggregator(Layer):
"""This layer equals to equation (5) and equation (8) in
paper 'Spam Review Detection with Graph Convolutional Networks.'
"""
def __init__(self, input_dim1, input_dim2, output_dim, hid_dim, user_review_adj, user_item_adj, item_review_adj,
item_user_adj,
review_vecs, user_vecs, item_vecs, dropout=0., bias=False, act=tf.nn.relu,
name=None, concat=False, **kwargs):
super(AttentionAggregator, self).__init__(**kwargs)
self.dropout = dropout
self.bias = bias
self.act = act
self.concat = concat
self.user_review_adj = user_review_adj
self.user_item_adj = user_item_adj
self.item_review_adj = item_review_adj
self.item_user_adj = item_user_adj
self.review_vecs = review_vecs
self.user_vecs = user_vecs
self.item_vecs = item_vecs
if name is not None:
name = '/' + name
else:
name = ''
with tf.variable_scope(self.name + name + '_vars'):
self.vars['user_weights'] = glorot([input_dim1, hid_dim],
name='user_weights')
self.vars['item_weights'] = glorot([input_dim2, hid_dim],
name='item_weights')
self.vars['concate_user_weights'] = glorot([hid_dim, output_dim],
name='user_weights')
self.vars['concate_item_weights'] = glorot([hid_dim, output_dim],
name='item_weights')
if self.bias:
self.vars['bias'] = zeros([self.output_dim], name='bias')
if self.logging:
self._log_vars()
self.input_dim1 = input_dim1
self.input_dim2 = input_dim2
self.output_dim = output_dim
def _call(self, inputs):
review_vecs = tf.nn.dropout(self.review_vecs, 1 - self.dropout)
user_vecs = tf.nn.dropout(self.user_vecs, 1 - self.dropout)
item_vecs = tf.nn.dropout(self.item_vecs, 1 - self.dropout)
# num_samples = self.adj_info[4]
# neighbor sample
ur = tf.nn.embedding_lookup(review_vecs, tf.cast(self.user_review_adj, dtype=tf.int32))
ur = tf.transpose(tf.random_shuffle(tf.transpose(ur)))
# ur = tf.slice(ur, [0, 0], [-1, num_samples])
ri = tf.nn.embedding_lookup(item_vecs, tf.cast(self.user_item_adj, dtype=tf.int32))
ri = tf.transpose(tf.random_shuffle(tf.transpose(ri)))
# ri = tf.slice(ri, [0, 0], [-1, num_samples])
ir = tf.nn.embedding_lookup(review_vecs, tf.cast(self.item_review_adj, dtype=tf.int32))
ir = tf.transpose(tf.random_shuffle(tf.transpose(ir)))
# ir = tf.slice(ir, [0, 0], [-1, num_samples])
ru = tf.nn.embedding_lookup(user_vecs, tf.cast(self.item_user_adj, dtype=tf.int32))
ru = tf.transpose(tf.random_shuffle(tf.transpose(ru)))
# ru = tf.slice(ru, [0, 0], [-1, num_samples])
concate_user_vecs = tf.concat([ur, ri], axis=2)
concate_item_vecs = tf.concat([ir, ru], axis=2)
# concate neighbor's embedding
s1 = tf.shape(concate_user_vecs)
s2 = tf.shape(concate_item_vecs)
concate_user_vecs = tf.reshape(concate_user_vecs, [s1[0], s1[1] * s1[2]])
concate_item_vecs = tf.reshape(concate_item_vecs, [s2[0], s2[1] * s2[2]])
# attention
concate_user_vecs, _ = AttentionLayer.scaled_dot_product_attention(q=user_vecs, k=user_vecs,
v=concate_user_vecs,
mask=None)
concate_item_vecs, _ = AttentionLayer.scaled_dot_product_attention(q=item_vecs, k=item_vecs,
v=concate_item_vecs,
mask=None)
# [nodes] x [out_dim]
user_output = tf.matmul(concate_user_vecs, self.vars['user_weights'])
item_output = tf.matmul(concate_item_vecs, self.vars['item_weights'])
# bias
if self.bias:
user_output += self.vars['bias']
item_output += self.vars['bias']
user_output = self.act(user_output)
item_output = self.act(item_output)
# Combination
if self.concat:
user_output = tf.matmul(user_output, self.vars['concate_user_weights'])
item_output = tf.matmul(item_output, self.vars['concate_item_weights'])
user_output = tf.concat([user_vecs, user_output], axis=1)
item_output = tf.concat([item_vecs, item_output], axis=1)
return user_output, item_output
class GASConcatenation(Layer):
"""GCN-based Anti-Spam(GAS) layer for concatenation of comment embedding learned by GCN from the Comment Graph
and other embeddings learned in previous operations.
"""
def __init__(self, review_item_adj, review_user_adj,
review_vecs, item_vecs, user_vecs, homo_vecs, name=None, **kwargs):
super(GASConcatenation, self).__init__(**kwargs)
self.review_item_adj = review_item_adj
self.review_user_adj = review_user_adj
self.review_vecs = review_vecs
self.user_vecs = user_vecs
self.item_vecs = item_vecs
self.homo_vecs = homo_vecs
if name is not None:
name = '/' + name
else:
name = ''
if self.logging:
self._log_vars()
def _call(self, inputs):
# neighbor sample
ri = tf.nn.embedding_lookup(self.item_vecs, tf.cast(self.review_item_adj, dtype=tf.int32))
# ri = tf.transpose(tf.random_shuffle(tf.transpose(ri)))
# ir = tf.slice(ir, [0, 0], [-1, num_samples])
ru = tf.nn.embedding_lookup(self.user_vecs, tf.cast(self.review_user_adj, dtype=tf.int32))
# ru = tf.transpose(tf.random_shuffle(tf.transpose(ru)))
# ru = tf.slice(ru, [0, 0], [-1, num_samples])
concate_vecs = tf.concat([ri, self.review_vecs, ru, self.homo_vecs], axis=1)
return concate_vecs
class GEMLayer(Layer):
"""This layer equals to the equation (8) in
paper 'Heterogeneous Graph Neural Networks for Malicious Account Detection.'
"""
def __init__(self, placeholders, nodes, device_num, embedding, encoding, name=None, **kwargs):
super(GEMLayer, self).__init__(**kwargs)
self.nodes = nodes
self.devices_num = device_num
self.encoding = encoding
self.embedding = embedding
self.placeholders = placeholders
if name is not None:
name = '/' + name
else:
name = ''
with tf.variable_scope(self.name + name + '_vars'):
self.vars['W'] = glorot([embedding, encoding], name='W')
self.vars['V'] = glorot([encoding, encoding], name='V')
self.vars['alpha'] = glorot([self.devices_num, 1], name='V')
if self.logging:
self._log_vars()
def _call(self, inputs):
h1 = tf.matmul(self.placeholders['x'], self.vars['W'])
h2 = []
for d in range(self.devices_num):
ahv = tf.matmul(tf.matmul(self.placeholders['a'][d], inputs), self.vars['V'])
h2.append(ahv)
h2 = tf.concat(h2, 0)
h2 = tf.reshape(h2, [self.devices_num, self.nodes * self.encoding])
h2 = tf.transpose(h2, [1, 0])
h2 = tf.reshape(tf.matmul(h2, tf.nn.softmax(self.vars['alpha'])), [self.nodes, self.encoding])
h = tf.nn.sigmoid(h1 + h2)
return h
class GAT(Layer):
"""This layer is adapted from PetarV-/GAT.'
"""
def __init__(self, dim, attn_drop, ffd_drop, bias_mat, n_heads, name=None, **kwargs):
super(GAT, self).__init__(**kwargs)
self.dim = dim
self.attn_drop = attn_drop
self.ffd_drop = ffd_drop
self.bias_mat = bias_mat
self.n_heads = n_heads
if name is not None:
name = '/' + name
else:
name = ''
if self.logging:
self._log_vars()
def attn_head(self, seq, out_sz, bias_mat, activation, in_drop=0.0, coef_drop=0.0, residual=False):
conv1d = tf.layers.conv1d
with tf.name_scope('my_attn'):
if in_drop != 0.0:
seq = tf.nn.dropout(seq, 1.0 - in_drop)
seq_fts = tf.layers.conv1d(seq, out_sz, 1, use_bias=False)
# simplest self-attention possible
f_1 = tf.layers.conv1d(seq_fts, 1, 1)
f_2 = tf.layers.conv1d(seq_fts, 1, 1)
logits = f_1 + tf.transpose(f_2, [0, 2, 1])
coefs = tf.nn.softmax(tf.nn.leaky_relu(logits) + bias_mat)
if coef_drop != 0.0:
coefs = tf.nn.dropout(coefs, 1.0 - coef_drop)
if in_drop != 0.0:
seq_fts = tf.nn.dropout(seq_fts, 1.0 - in_drop)
vals = tf.matmul(coefs, seq_fts)
ret = tf.contrib.layers.bias_add(vals)
# residual connection
if residual:
if seq.shape[-1] != ret.shape[-1]:
ret = ret + conv1d(seq, ret.shape[-1], 1)
else:
ret = ret + seq
return activation(ret)
def inference(self, inputs):
out = []
# for i in range(n_heads[-1]):
for i in range(self.n_heads):
out.append(self.attn_head(inputs, bias_mat=self.bias_mat, out_sz=self.dim, activation=tf.nn.elu,
in_drop=self.ffd_drop, coef_drop=self.attn_drop, residual=False))
logits = tf.add_n(out) / self.n_heads
return logits
class GeniePathLayer(Layer):
"""This layer equals to the Adaptive Path Layer in
paper 'GeniePath: Graph Neural Networks with Adaptive Receptive Paths.'
The code is adapted from shawnwang-tech/GeniePath-pytorch
"""
def __init__(self, placeholders, nodes, in_dim, dim, heads=1, name=None, **kwargs):
super(GeniePathLayer, self).__init__(**kwargs)
self.nodes = nodes
self.in_dim = in_dim
self.dim = dim
self.heads = heads
self.placeholders = placeholders
if name is not None:
name = '/' + name
else:
name = ''
if self.logging:
self._log_vars()
def depth_forward(self, x, h, c):
with tf.variable_scope('lstm', reuse=tf.AUTO_REUSE):
cell = tf.nn.rnn_cell.LSTMCell(num_units=h, state_is_tuple=True)
x, (c, h) = tf.nn.dynamic_rnn(cell, x, dtype=tf.float32)
return x, (c, h)
def breadth_forward(self, x, bias_in):
x = tf.tanh(GAT(self.dim, attn_drop=0, ffd_drop=0, bias_mat=bias_in, n_heads=self.heads).inference(x))
return x
def forward(self, x, bias_in, h, c):
x = self.breadth_forward(x, bias_in)
x, (h, c) = self.depth_forward(x, h, c)
x = x[0]
return x, (h, c)
# def lazy_forward(self, x, bias_in, h, c):
# x = self.breadth_forward(x, bias_in)
# x, (h, c) = self.depth_forward(x, h, c)
# x = x[0]
# return x, (h, c)
|
plenum/test/watermarks/test_watermarks_after_view_change.py | IDunion/indy-plenum | 148 | 11148506 | import pytest
from plenum.test import waits
from plenum.test.delayers import cDelay, chk_delay, icDelay, nv_delay
from plenum.test.helper import sdk_send_random_and_check, waitForViewChange
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.stasher import delay_rules
from plenum.test.view_change_service.helper import trigger_view_change
CHK_FREQ = 2
LOG_SIZE = 2 * CHK_FREQ
Max3PCBatchSize = 1
@pytest.fixture(scope='module')
def tconf(tconf):
old_max_3pc_batch_size = tconf.Max3PCBatchSize
old_log_size = tconf.LOG_SIZE
old_chk_freq = tconf.CHK_FREQ
tconf.Max3PCBatchSize = Max3PCBatchSize
tconf.LOG_SIZE = LOG_SIZE
tconf.CHK_FREQ = CHK_FREQ
yield tconf
tconf.Max3PCBatchSize = old_max_3pc_batch_size
tconf.LOG_SIZE = old_log_size
tconf.CHK_FREQ = old_chk_freq
def test_watermarks_after_view_change(tdir, tconf,
looper,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
"""
Delay commit, checkpoint, InstanceChange and ViewChangeDone messages for lagging_node.
Start ViewChange.
Check that ViewChange finished.
Reset delays.
Check that lagging_node can order transactions and has same data with other nodes.
"""
lagging_node = txnPoolNodeSet[-1]
lagging_node.master_replica.config.LOG_SIZE = LOG_SIZE
start_view_no = lagging_node.viewNo
with delay_rules(lagging_node.nodeIbStasher, cDelay(), chk_delay(), icDelay(), nv_delay()):
trigger_view_change(txnPoolNodeSet)
waitForViewChange(looper,
txnPoolNodeSet[:-1],
expectedViewNo=start_view_no + 1,
customTimeout=waits.expectedPoolViewChangeStartedTimeout(len(txnPoolNodeSet)))
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet[:-1])
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 6)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 1)
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
|
utils/usergrid-util-python/samples/beacon-event-example.py | snoopdave/incubator-usergrid | 788 | 11148511 | <reponame>snoopdave/incubator-usergrid<filename>utils/usergrid-util-python/samples/beacon-event-example.py
# */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
# URL Templates for Usergrid
#
# Get all events for a user:
# https://usergrid.net/beacon-sample/event-example/users/jeff/events
#
# Get only enterStore events:
# https://usergrid.net/beacon-sample/event-example/users/jeff/events?ql=select * where eventtype=‘enterStore'
#
# Get/filter beacon events for a user:
# https://usergrid.net/beacon-sample/event-example/users/jeff/events?ql=select * where eventtype=‘beacon'
#
# Get latest beacon event for user:
# https://usergrid.net/beacon-sample/event-example/users/jeff/events?ql=select * where eventtype=‘beacon’&limit=1
#
# Beacon events for store:
# https://usergrid.net/beacon-sample/event-example/users/jeff/events?ql=select * where eventtype=‘beacon'
#
# All events for store:
# https://usergrid.net/beacon-sample/event-example/stores/store_123/events
#
# All events for a beacon:
# https://usergrid.net/beacon-sample/event-example/beacons/store_456-b2/events
#
# Get Users who passed a specific beacon:
# https://usergrid.net/beacon-sample/event-example/beacons/3fd4fccb-d43b-11e5-978a-123320acb31f/events;ql=select%20* where profile=1/connecting/events/users
__author__ = '<EMAIL>'
import json
import random
import requests
from multiprocessing import Process, Pool
import time
collection_url_template = "{api_url}/{org}/{app}/{collection}"
entity_url_template = "{api_url}/{org}/{app}/{collection}/{entity_id}"
connection_query_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}"
connection_create_url_template = "{api_url}/{org}/{app}/{collection}/{uuid}/{verb}/{target_uuid}"
url_data = {
'api_url': 'https://usergridhost/basepath',
'org': 'samples',
'app': 'event-example'
}
session = requests.Session()
class EventGenerator(Process):
def __init__(self, store_id, event_count, user_array, beacons):
super(EventGenerator, self).__init__()
self.store_id = store_id
self.user_array = user_array
self.event_count = event_count
self.beacons = beacons
self.session = requests.Session()
self.create_store(self.store_id)
self.create_users(self.user_array)
def create_store(self, store_id):
url = entity_url_template.format(collection='stores', entity_id=store_id, **url_data)
r = self.session.put(url, data=json.dumps({"name": store_id}))
if r.status_code != 200:
print 'Error creating store [%s] at URL=[%s]: %s' % (store_id, url, r.text)
def create_event(self, user, event):
print 'creating event: %s' % json.dumps(event)
url = collection_url_template.format(collection='general-events', **url_data)
r = self.session.post(url, data=json.dumps(event))
if r.status_code == 200:
res = r.json()
entity = res.get('entities')[0]
event_uuid = entity.get('uuid')
# link to user
create_connection_url = connection_create_url_template.format(collection='users',
uuid=user,
verb='events',
target_uuid=event_uuid,
**url_data)
r_connect = self.session.post(create_connection_url)
if r_connect.status_code == 200:
print 'created connection: %s' % create_connection_url
# link to store
create_connection_url = connection_create_url_template.format(collection='stores',
uuid=event.get('storeId'),
verb='events',
target_uuid=event_uuid,
**url_data)
r_connect = self.session.post(create_connection_url)
if r_connect.status_code == 200:
print 'created connection: %s' % create_connection_url
if event.get('eventType') == 'beacon':
# link to beacon
create_connection_url = connection_create_url_template.format(collection='beacons',
uuid=event.get('beaconId'),
verb='events',
target_uuid=event_uuid,
**url_data)
r_connect = self.session.post(create_connection_url)
if r_connect.status_code == 200:
print 'created connection: %s' % create_connection_url
else:
print 'Error creating connection at URL=[%s]: %s' % (create_connection_url, r.text)
def run(self):
for user in self.user_array:
# store 123
self.create_event(user, {
'storeId': self.store_id,
'eventType': 'enterStore'
})
for x in xrange(0, self.event_count):
beacon_number = random.randint(0, len(self.beacons) - 1)
beacon_name = self.beacons[beacon_number]
event = {
'beaconId': '%s-%s' % (self.store_id, beacon_name),
'storeId': self.store_id,
'eventType': 'beacon'
}
self.create_event(user, event)
self.create_event(user, {
'storeId': self.store_id,
'eventType': 'exitStore'
})
def create_users(self, user_array):
for user in user_array:
self.create_user(user)
def create_user(self, user):
data = {
'username': user,
'email': <EMAIL>' % user
}
url = collection_url_template.format(collection='users', **url_data)
r = self.session.post(url, json.dumps(data))
if r.status_code != 200:
print 'Error creating user [%s] at URL=[%s]: %s' % (user, url, r.text)
def create_entity(entity_type, entity_name):
url = entity_url_template.format(collection=entity_type, entity_id=entity_name, **url_data)
r = session.put(url, data=json.dumps({'name': entity_name}))
if r.status_code != 200:
print 'Error creating %s [%s] at URL=[%s]: %s' % (entity_type, entity_name, url, r.text)
def create_beacon(beacon_name):
create_entity('beacons', beacon_name)
def create_store(store_name):
create_entity('stores', store_name)
def main():
beacons = ["b1", "b2", "b3", "b4", "b5", "b6"]
stores = ['store_123', 'store_456', 'store_789', 'store_901']
beacon_names = []
for store in stores:
for beacon in beacons:
beacon_names.append('%s-%s' % (store, beacon))
pool = Pool(16)
pool.map(create_beacon, beacon_names)
pool.map(create_store, stores)
processes = [
EventGenerator(stores[0], 100, ['jeff', 'julie'], beacons=beacons),
EventGenerator(stores[0], 100, ['russo', 'dunker'], beacons=beacons),
EventGenerator(stores[2], 100, ['jeff', 'julie'], beacons=beacons),
EventGenerator(stores[2], 100, ['russo', 'dunker'], beacons=beacons),
EventGenerator(stores[3], 100, ['jeff', 'julie'], beacons=beacons),
EventGenerator(stores[3], 100, ['russo', 'dunker'], beacons=beacons),
EventGenerator(stores[1], 100, ['bala', 'shankar'], beacons=beacons),
EventGenerator(stores[1], 100, ['chet', 'anant'], beacons=beacons)
]
[p.start() for p in processes]
while len([p for p in processes if p.is_alive()]) > 0:
print 'Processors active, waiting'
time.sleep(1)
main()
|
docx/oxml/document.py | mooosee/python-docx | 169 | 11148524 | # encoding: utf-8
"""
Custom element classes that correspond to the document part, e.g.
<w:document>.
"""
from .xmlchemy import BaseOxmlElement, ZeroOrOne, ZeroOrMore
class CT_Document(BaseOxmlElement):
"""
``<w:document>`` element, the root element of a document.xml file.
"""
body = ZeroOrOne('w:body')
@property
def sectPr_lst(self):
"""
Return a list containing a reference to each ``<w:sectPr>`` element
in the document, in the order encountered.
"""
return self.xpath('.//w:sectPr')
class CT_Body(BaseOxmlElement):
"""
``<w:body>``, the container element for the main document story in
``document.xml``.
"""
p = ZeroOrMore('w:p', successors=('w:sectPr',))
tbl = ZeroOrMore('w:tbl', successors=('w:sectPr',))
sectPr = ZeroOrOne('w:sectPr', successors=())
def add_section_break(self):
"""
Return the current ``<w:sectPr>`` element after adding a clone of it
in a new ``<w:p>`` element appended to the block content elements.
Note that the "current" ``<w:sectPr>`` will always be the sentinel
sectPr in this case since we're always working at the end of the
block content.
"""
sentinel_sectPr = self.get_or_add_sectPr()
cloned_sectPr = sentinel_sectPr.clone()
p = self.add_p()
p.set_sectPr(cloned_sectPr)
return sentinel_sectPr
def clear_content(self):
"""
Remove all content child elements from this <w:body> element. Leave
the <w:sectPr> element if it is present.
"""
if self.sectPr is not None:
content_elms = self[:-1]
else:
content_elms = self[:]
for content_elm in content_elms:
self.remove(content_elm)
|
testing/tests/001-main/003-self/020-fixup-review-via-push.py | fekblom/critic | 216 | 11148546 | <filename>testing/tests/001-main/003-self/020-fixup-review-via-push.py
import os
def to(name):
return testing.mailbox.ToRecipient("<EMAIL>" % name)
def about(subject):
return testing.mailbox.WithSubject(subject)
FILENAME = "020-fixup-review-via-push.txt"
SETTINGS = { "review.createViaPush": True }
with testing.utils.settings("alice", SETTINGS), frontend.signin("alice"):
with repository.workcopy() as work:
REMOTE_URL = instance.repository_url("alice")
with open(os.path.join(work.path, FILENAME), "w") as text_file:
print >>text_file, "Some content."
work.run(["add", FILENAME])
work.run(["commit", "-m", """\
fixup! Commit reference
Relevant summary
"""],
GIT_AUTHOR_NAME="<NAME>",
GIT_AUTHOR_EMAIL="<EMAIL>",
GIT_COMMITTER_NAME="<NAME>",
GIT_COMMITTER_EMAIL="<EMAIL>")
work.run(["push", "-q", REMOTE_URL,
"HEAD:refs/heads/r/020-fixup-review-via-push"])
mailbox.pop(accept=[to("alice"), about("New Review: Relevant summary")])
|
apps/oauth/forms.py | sbybfai/izone | 1,009 | 11148568 | # -*- coding: utf-8 -*-
from django import forms
from .models import Ouser
class ProfileForm(forms.ModelForm):
class Meta:
model = Ouser
fields = ['link','avatar']
|
Section 6 - NLP Core/NLP Part 18 - Latent Semantic Analysis Part 2.py | kungfumas/bahasa-alami | 169 | 11148573 | # Latent Semantic Analysis using Python
# Importing the Libraries
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
import nltk
# Sample Data
dataset = ["The amount of polution is increasing day by day",
"The concert was just great",
"I love to see <NAME> cook",
"Google is introducing a new technology",
"AI Robots are examples of great technology present today",
"All of us were singing in the concert",
"We have launch campaigns to stop pollution and global warming"]
dataset = [line.lower() for line in dataset]
# Creating Tfidf Model
vectorizer = TfidfVectorizer()
X = vectorizer.fit_transform(dataset)
# Visualizing the Tfidf Model
print(X[0])
# Creating the SVD
lsa = TruncatedSVD(n_components = 4, n_iter = 100)
lsa.fit(X)
# First Column of V
row1 = lsa.components_[3]
# Word Concept Dictionary Creation
concept_words = {}
# Visualizing the concepts
terms = vectorizer.get_feature_names()
for i,comp in enumerate(lsa.components_):
componentTerms = zip(terms,comp)
sortedTerms = sorted(componentTerms,key=lambda x:x[1],reverse=True)
sortedTerms = sortedTerms[:10]
concept_words["Concept "+str(i)] = sortedTerms
# Sentence Concepts
for key in concept_words.keys():
sentence_scores = []
for sentence in dataset:
words = nltk.word_tokenize(sentence)
score = 0
for word in words:
for word_with_score in concept_words[key]:
if word == word_with_score[0]:
score += word_with_score[1]
sentence_scores.append(score)
print("\n"+key+":")
for sentence_score in sentence_scores:
print(sentence_score) |
docs/user_guides/simple_case/word2vec/train.py | shiyutang/docs | 104 | 11148574 | <filename>docs/user_guides/simple_case/word2vec/train.py
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle as paddle
import paddle.fluid as fluid
import six
import numpy
import sys
import math
import argparse
EMBED_SIZE = 32
HIDDEN_SIZE = 256
N = 5
BATCH_SIZE = 100
word_dict = paddle.dataset.imikolov.build_dict()
dict_size = len(word_dict)
def parse_args():
parser = argparse.ArgumentParser("word2vec")
parser.add_argument(
'--enable_ce',
action='store_true',
help='If set, run the task with continuous evaluation logs.')
parser.add_argument(
'--use_gpu', type=int, default=0, help='whether to use gpu')
parser.add_argument(
'--num_epochs', type=int, default=100, help='number of epoch')
args = parser.parse_args()
return args
def inference_program(words, is_sparse):
embed_first = fluid.embedding(
input=words[0],
size=[dict_size, EMBED_SIZE],
dtype='float32',
is_sparse=is_sparse,
param_attr='shared_w')
embed_second = fluid.embedding(
input=words[1],
size=[dict_size, EMBED_SIZE],
dtype='float32',
is_sparse=is_sparse,
param_attr='shared_w')
embed_third = fluid.embedding(
input=words[2],
size=[dict_size, EMBED_SIZE],
dtype='float32',
is_sparse=is_sparse,
param_attr='shared_w')
embed_fourth = fluid.embedding(
input=words[3],
size=[dict_size, EMBED_SIZE],
dtype='float32',
is_sparse=is_sparse,
param_attr='shared_w')
concat_embed = fluid.layers.concat(
input=[embed_first, embed_second, embed_third, embed_fourth], axis=1)
hidden1 = fluid.layers.fc(
input=concat_embed, size=HIDDEN_SIZE, act='sigmoid')
predict_word = fluid.layers.fc(
input=hidden1, size=dict_size, act='softmax')
return predict_word
def train_program(predict_word):
# The declaration of 'next_word' must be after the invoking of inference_program,
# or the data input order of train program would be [next_word, firstw, secondw,
# thirdw, fourthw], which is not correct.
next_word = fluid.data(name='nextw', shape=[None, 1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict_word, label=next_word)
avg_cost = fluid.layers.mean(cost)
return avg_cost
def optimizer_func():
return fluid.optimizer.AdagradOptimizer(
learning_rate=3e-3,
regularization=fluid.regularizer.L2DecayRegularizer(8e-4))
def train(if_use_cuda, params_dirname, is_sparse=True):
place = fluid.CUDAPlace(0) if if_use_cuda else fluid.CPUPlace()
train_reader = fluid.io.batch(
paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE)
test_reader = fluid.io.batch(
paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE)
first_word = fluid.data(name='firstw', shape=[None, 1], dtype='int64')
second_word = fluid.data(name='secondw', shape=[None, 1], dtype='int64')
third_word = fluid.data(name='thirdw', shape=[None, 1], dtype='int64')
forth_word = fluid.data(name='fourthw', shape=[None, 1], dtype='int64')
next_word = fluid.data(name='nextw', shape=[None, 1], dtype='int64')
word_list = [first_word, second_word, third_word, forth_word, next_word]
feed_order = ['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw']
main_program = fluid.default_main_program()
star_program = fluid.default_startup_program()
if args.enable_ce:
main_program.random_seed = 90
star_program.random_seed = 90
predict_word = inference_program(word_list, is_sparse)
avg_cost = train_program(predict_word)
test_program = main_program.clone(for_test=True)
optimizer = optimizer_func()
optimizer.minimize(avg_cost)
exe = fluid.Executor(place)
def train_test(program, reader):
count = 0
feed_var_list = [
program.global_block().var(var_name) for var_name in feed_order
]
feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place)
test_exe = fluid.Executor(place)
accumulated = len([avg_cost]) * [0]
for test_data in reader():
avg_cost_np = test_exe.run(
program=program,
feed=feeder_test.feed(test_data),
fetch_list=[avg_cost])
accumulated = [
x[0] + x[1][0] for x in zip(accumulated, avg_cost_np)
]
count += 1
return [x / count for x in accumulated]
def train_loop():
step = 0
feed_var_list_loop = [
main_program.global_block().var(var_name)
for var_name in feed_order
]
feeder = fluid.DataFeeder(feed_list=feed_var_list_loop, place=place)
exe.run(star_program)
for pass_id in range(PASS_NUM):
for data in train_reader():
avg_cost_np = exe.run(
main_program,
feed=feeder.feed(data),
fetch_list=[avg_cost])
if step % 10 == 0:
outs = train_test(test_program, test_reader)
# print("Step %d: Average Cost %f" % (step, avg_cost_np[0]))
print("Step %d: Average Cost %f" % (step, outs[0]))
# print(outs)
# it will take a few hours.
# If average cost is lower than 5.8, we consider the model good enough to stop.
# Note 5.8 is a relatively high value. In order to get a better model, one should
# aim for avg_cost lower than 3.5. But the training could take longer time.
if outs[0] < 5.8:
if args.enable_ce:
print("kpis\ttrain_cost\t%f" % outs[0])
if params_dirname is not None:
fluid.io.save_inference_model(params_dirname, [
'firstw', 'secondw', 'thirdw', 'fourthw'
], [predict_word], exe)
return
step += 1
if math.isnan(float(avg_cost_np[0])):
sys.exit("got NaN loss, training failed.")
raise AssertionError(
"Cost is too large {0:2.2}".format(avg_cost_np[0]))
train_loop()
def infer(use_cuda, params_dirname=None):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
# Use fluid.io.load_inference_model to obtain the inference program desc,
# the feed_target_names (the names of variables that will be feeded
# data using feed operators), and the fetch_targets (variables that
# we want to obtain data from using fetch operators).
[inferencer, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)
# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word
# is simply an index to look up for the corresponding word vector and hence
# the shape of word (base_shape) should be [1]. The recursive_sequence_lengths,
# which is length-based level of detail (lod) of each LoDTensor, should be [[1]]
# meaning there is only one level of detail and there is only one sequence of
# one word on this level.
# Note that recursive_sequence_lengths should be a list of lists.
data1 = numpy.asarray([[211]], dtype=numpy.int64) # 'among'
data2 = numpy.asarray([[6]], dtype=numpy.int64) # 'a'
data3 = numpy.asarray([[96]], dtype=numpy.int64) # 'group'
data4 = numpy.asarray([[4]], dtype=numpy.int64) # 'of'
lod = numpy.asarray([[1]], dtype=numpy.int64)
first_word = fluid.create_lod_tensor(data1, lod, place)
second_word = fluid.create_lod_tensor(data2, lod, place)
third_word = fluid.create_lod_tensor(data3, lod, place)
fourth_word = fluid.create_lod_tensor(data4, lod, place)
assert feed_target_names[0] == 'firstw'
assert feed_target_names[1] == 'secondw'
assert feed_target_names[2] == 'thirdw'
assert feed_target_names[3] == 'fourthw'
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(
inferencer,
feed={
feed_target_names[0]: first_word,
feed_target_names[1]: second_word,
feed_target_names[2]: third_word,
feed_target_names[3]: fourth_word
},
fetch_list=fetch_targets,
return_numpy=False)
print(numpy.array(results[0]))
most_possible_word_index = numpy.argmax(results[0])
print(most_possible_word_index)
print([
key for key, value in six.iteritems(word_dict)
if value == most_possible_word_index
][0])
print(results[0].recursive_sequence_lengths())
np_data = numpy.array(results[0])
print("Inference Shape: ", np_data.shape)
def main(use_cuda, is_sparse):
if use_cuda and not fluid.core.is_compiled_with_cuda():
return
params_dirname = "word2vec.inference.model"
train(
if_use_cuda=use_cuda,
params_dirname=params_dirname,
is_sparse=is_sparse)
infer(use_cuda=use_cuda, params_dirname=params_dirname)
if __name__ == '__main__':
args = parse_args()
PASS_NUM = args.num_epochs
use_cuda = args.use_gpu # set to True if training with GPU
main(use_cuda=use_cuda, is_sparse=True)
|
omnizart/cli/common_options.py | nicolasanjoran/omnizart | 1,145 | 11148608 | import click
def add_common_options(options):
def add_options(func):
for option in reversed(options):
func = option(func)
return func
return add_options
COMMON_TRANSCRIBE_OPTIONS = [
click.argument("input_audio", type=click.Path(exists=True)),
click.option(
"-m",
"--model-path",
help="Path to the pre-trained model or the supported transcription mode."
),
click.option(
"-o",
"--output",
help="Path to output the prediction file (could be MIDI, CSV, ..., etc.)",
default="./",
show_default=True,
type=click.Path(writable=True)
)
]
COMMON_GEN_FEATURE_OPTIONS = [
click.option(
"-d",
"--dataset-path",
help="Path to the downloaded dataset",
type=click.Path(exists=True),
required=True
),
click.option(
"-o",
"--output-path",
help="Path for saving the extracted feature. Default to the folder under the dataset.",
type=click.Path(writable=True)
),
click.option(
"-n",
"--num-threads",
help="Number of threads used for parallel feature extraction.",
type=int,
default=4,
show_default=True
)
]
COMMON_TRAIN_MODEL_OPTIONS = [
click.option(
"-d",
"--feature-path",
help="Path to the folder of extracted feature",
type=click.Path(exists=True),
required=True,
),
click.option(
"-m",
"--model-name",
help="Name for the output model (can be a path)",
type=click.Path(writable=True)
),
click.option(
"-i",
"--input-model",
help="If given, the training will continue to fine-tune the pre-trained model.",
type=click.Path(exists=True, writable=True),
),
click.option("-e", "--epochs", help="Number of training epochs", type=int),
click.option("-s", "--steps", help="Number of training steps of each epoch", type=int),
click.option("-vs", "--val-steps", help="Number of validation steps of each epoch", type=int),
click.option("-b", "--batch-size", help="Batch size of each training step", type=int),
click.option("-vb", "--val-batch-size", help="Batch size of each validation step", type=int),
click.option(
"--early-stop",
help="Stop the training if validation accuracy does not improve over the given number of epochs.",
type=int
)
]
|
tests/test_ec2/test_account_attributes.py | gtourkas/moto | 5,460 | 11148621 | <filename>tests/test_ec2/test_account_attributes.py
import boto3
from moto import mock_ec2
import sure # pylint: disable=unused-import
@mock_ec2
def test_describe_account_attributes():
conn = boto3.client("ec2", region_name="us-east-1")
response = conn.describe_account_attributes()
expected_attribute_values = [
{
"AttributeValues": [{"AttributeValue": "5"}],
"AttributeName": "vpc-max-security-groups-per-interface",
},
{
"AttributeValues": [{"AttributeValue": "20"}],
"AttributeName": "max-instances",
},
{
"AttributeValues": [{"AttributeValue": "EC2"}, {"AttributeValue": "VPC"}],
"AttributeName": "supported-platforms",
},
{
"AttributeValues": [{"AttributeValue": "none"}],
"AttributeName": "default-vpc",
},
{
"AttributeValues": [{"AttributeValue": "5"}],
"AttributeName": "max-elastic-ips",
},
{
"AttributeValues": [{"AttributeValue": "5"}],
"AttributeName": "vpc-max-elastic-ips",
},
]
response["AccountAttributes"].should.equal(expected_attribute_values)
|
IOMC/EventVertexGenerators/python/GaussianZBeamSpotFilter_cfi.py | ckamtsikis/cmssw | 852 | 11148626 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
from IOMC.EventVertexGenerators.BeamSpotFilterParameters_cfi import baseVtx,newVtx
simBeamSpotFilter = cms.EDFilter("GaussianZBeamSpotFilter",
src = cms.InputTag("generatorSmeared"),
baseSZ = baseVtx.SigmaZ,
baseZ0 = baseVtx.Z0,
newSZ = newVtx.SigmaZ,
newZ0 = newVtx.Z0
)
|
applications/DelaunayMeshingApplication/python_scripts/post_refining_mesher.py | lkusch/Kratos | 778 | 11148638 | <reponame>lkusch/Kratos
from __future__ import print_function, absolute_import, division # makes KratosMultiphysics backward compatible with python 2.6 and 2.7
#import kratos core and applications
import KratosMultiphysics
import KratosMultiphysics.DelaunayMeshingApplication as KratosDelaunay
# Import the mesher (the base class for the mesher derivation)
from KratosMultiphysics.DelaunayMeshingApplication import mesher
def CreateMesher(main_model_part, meshing_parameters):
return PostRefiningMesher(main_model_part, meshing_parameters)
class PostRefiningMesher(mesher.Mesher):
#
def __init__(self, main_model_part, meshing_parameters):
mesher.Mesher.__init__(self, main_model_part, meshing_parameters)
#
def InitializeMeshing(self):
# set mesher flags: to set options for the mesher (triangle 2D, tetgen 3D)
# REFINE
refining_parameters = self.MeshingParameters.GetRefiningParameters()
refining_options = refining_parameters.GetRefiningOptions()
mesher_flags = ""
mesher_info = "Refine the domain"
meshing_options = self.MeshingParameters.GetOptions()
if( self.dimension == 2 ):
if( refining_options.Is(KratosDelaunay.MesherUtilities.REFINE_ADD_NODES) ):
#"YYJaqrn" "YJq1.4arn" "Jq1.4arn"
if( meshing_options.Is(KratosDelaunay.MesherUtilities.CONSTRAINED) ):
mesher_flags = "pYJq1.4arnCQ"
else:
mesher_flags = "YJq1.4arnQ"
if( refining_options.Is(KratosDelaunay.MesherUtilities.REFINE_INSERT_NODES) ):
#"riYYJQ" "riYYJQ" "riJQ" "riQ"
if( meshing_options.Is(KratosDelaunay.MesherUtilities.CONSTRAINED) ):
mesher_flags = "rinYYJQ"
else:
mesher_flags = "rinJQ"
elif( self.dimension == 3 ):
if( refining_options.Is(KratosDelaunay.MesherUtilities.REFINE_ADD_NODES) ):
if( meshing_options.Is(KratosDelaunay.MesherUtilities.CONSTRAINED) ):
mesher_flags = "pMYJq1.4arnCBQF"
else:
mesher_flags = "YJq1.4arnBQF"
if( refining_options.Is(KratosDelaunay.MesherUtilities.REFINE_INSERT_NODES) ):
if( meshing_options.Is(KratosDelaunay.MesherUtilities.CONSTRAINED) ):
mesher_flags = "rinYYJBQF"
else:
mesher_flags = "rinJBQF"
self.MeshingParameters.SetTessellationFlags(mesher_flags)
self.MeshingParameters.SetTessellationInfo(mesher_info)
#
def SetPreMeshingProcesses(self):
# no process to start
pass
#
@classmethod
def _class_prefix(self):
header = "::[---Post Refining---]::"
return header
|
isserviceup/services/pusher.py | EvgeshaGars/is-service-up | 182 | 11148653 | <reponame>EvgeshaGars/is-service-up<filename>isserviceup/services/pusher.py
from isserviceup.services.models.statuspage import StatusPagePlugin
class Pusher(StatusPagePlugin):
name = 'Pusher'
status_url = 'https://status.pusher.com/'
icon_url = '/images/icons/pusher.png'
|
examples/example_mnist_ae.py | julesmuhizi/qkeras | 388 | 11148672 | <reponame>julesmuhizi/qkeras
# Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""uses po2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from collections import defaultdict
import tensorflow.keras.backend as K
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.utils import to_categorical
from qkeras import *
from qkeras.utils import model_save_quantized_weights
import numpy as np
import tensorflow.compat.v1 as tf
np.random.seed(42)
NB_EPOCH = 100
BATCH_SIZE = 64
VERBOSE = 1
NB_CLASSES = 10
OPTIMIZER = Adam(lr=0.0001, decay=0.000025)
VALIDATION_SPLIT = 0.1
train = 1
(x_train, y_train), (x_test, y_test) = mnist.load_data()
RESHAPED = 784
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train = x_train[..., np.newaxis]
x_test = x_test[..., np.newaxis]
x_train /= 256.0
x_test /= 256.0
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
print(y_train[0:10])
y_train = to_categorical(y_train, NB_CLASSES)
y_test = to_categorical(y_test, NB_CLASSES)
x = x_in = Input(
x_train.shape[1:-1] + (1,))
x = QConv2D(
32,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2D(
16,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2D(
8,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2DTranspose(
8,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2DTranspose(
16,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2DTranspose(
32,
kernel_size=(3, 3),
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x = QActivation("quantized_relu(4,0)")(x)
x = QConv2D(
1,
kernel_size=(3, 3),
padding="same",
kernel_quantizer=quantized_bits(4,0,1),
bias_quantizer=quantized_bits(4,0,1))(x)
x_out = x
x = Activation("sigmoid")(x)
model = Model(inputs=[x_in], outputs=[x])
mo = Model(inputs=[x_in], outputs=[x_out])
model.summary()
model.compile(
loss="binary_crossentropy", optimizer=OPTIMIZER, metrics=["accuracy"])
if train:
history = model.fit(
x_train, x_train, batch_size=BATCH_SIZE,
epochs=NB_EPOCH, initial_epoch=1, verbose=VERBOSE,
validation_split=VALIDATION_SPLIT)
# Generate reconstructions
num_reco = 8
samples = x_test[:num_reco]
targets = y_test[:num_reco]
reconstructions = model.predict(samples)
for layer in model.layers:
for w, weight in enumerate(layer.get_weights()):
print(layer.name, w, weight.shape)
print_qstats(model)
|
Stock/Select/Engine/Regression/DyStockSelectRegressionEngineProcess.py | Leonardo-YXH/DevilYuan | 135 | 11148702 | import queue
from DyCommon.DyCommon import *
from EventEngine.DyEvent import *
from EventEngine.DyEventEngine import *
from ..DyStockSelectSelectEngine import *
from ....Common.DyStockCommon import DyStockCommon
def dyStockSelectRegressionEngineProcess(outQueue, inQueue, tradeDays, strategy, codes, histDaysDataSource):
strategyCls = strategy['class']
parameters = strategy['param']
DyStockCommon.defaultHistDaysDataSource = histDaysDataSource
dummyEventEngine = DyDummyEventEngine()
queueInfo = DyQueueInfo(outQueue)
selectEngine = DyStockSelectSelectEngine(dummyEventEngine, queueInfo, False)
selectEngine.setTestedStocks(codes)
for day in tradeDays:
try:
event = inQueue.get_nowait()
except queue.Empty:
pass
parameters['基准日期'] = day
if selectEngine.runStrategy(strategyCls, parameters):
event = DyEvent(DyEventType.stockSelectStrategyRegressionAck)
event.data['class'] = strategyCls
event.data['period'] = [tradeDays[0], tradeDays[-1]]
event.data['day'] = day
event.data['result'] = selectEngine.result
outQueue.put(event)
else:
queueInfo.print('回归选股策略失败:{0}, 周期[{1}, {2}], 基准日期{3}'.format(strategyCls.chName, tradeDays[0], tradeDays[-1], day), DyLogData.error)
|
base/site-packages/django_qbe/exports.py | edisonlz/fastor | 285 | 11148709 | <reponame>edisonlz/fastor
# -*- coding: utf-8 -*-
import codecs
import csv
from StringIO import StringIO
from django.http import HttpResponse
from django.utils.datastructures import SortedDict
__all__ = ("formats", )
class FormatsException(Exception):
pass
class Formats(SortedDict):
def add(self, format):
parent = self
def decorator(func):
if callable(func):
parent.update({format: func})
else:
raise FormatsException("func is not a function.")
return decorator
formats = Formats()
# Taken from http://docs.python.org/library/csv.html#csv-examples
class UnicodeWriter(object):
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel_tab, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
self.writer.writerow([unicode(s).encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def base_export(labels, results):
output = StringIO()
w = UnicodeWriter(output)
w.writerow(labels)
for row in results:
w.writerow(row)
output.seek(0)
return output.read()
@formats.add("csv")
def csv_format(labels, results):
output = base_export(labels, results)
mimetype = "text/csv"
return HttpResponse(output, mimetype=mimetype)
@formats.add("ods")
def ods_format(labels, results):
output = base_export(labels, results)
mimetype = "application/vnd.oasis.opendocument.spreadsheet"
return HttpResponse(output, mimetype=mimetype)
@formats.add("xls")
def xls_format(labels, results):
output = base_export(labels, results)
mimetype = "application/vnd.ms-excel"
return HttpResponse(output, mimetype=mimetype)
|
tests/functional/sample/child_sample/__init__.py | szabopeter/interrogate | 354 | 11148761 | <filename>tests/functional/sample/child_sample/__init__.py
# Copyright 2020 <NAME>
# intentionally no docstrings here
|
python/examples/include/example.py | bwoodhouse322/package | 512 | 11148798 | #!/usr/bin/python
from metaparticle_pkg import Containerize, PackageFile
import os
import time
import logging
# all metaparticle output is accessible through the stdlib logger (debug level)
logging.basicConfig(level=logging.INFO)
logging.getLogger('metaparticle_pkg.runner').setLevel(logging.DEBUG)
logging.getLogger('metaparticle_pkg.builder').setLevel(logging.DEBUG)
DATA_FILE = '/opt/some/random/spot/data1.json'
SCRIPT = '/opt/another/random/place/get_the_data.sh'
@Containerize(
package={
'name': 'file-example',
'repository': 'docker.io/brendanburns',
'publish': False,
'additionalFiles': [
PackageFile(src='./data.json', dest=DATA_FILE, mode='0400'),
PackageFile(src='./get_data.sh', dest=SCRIPT),
]
}
)
def main():
os.system(SCRIPT)
for i in range(5):
print('Sleeping ... {} sec'.format(i))
time.sleep(1)
if __name__ == '__main__':
main()
|
aw_nas/objective/fault_injection.py | Harald-R/aw_nas | 195 | 11148799 | <filename>aw_nas/objective/fault_injection.py
# -*- coding: utf-8 -*-
"""
Fault injection objective.
* Clean accuracy and fault-injected accuracy weighted for reward (for discrete controller search)
* Clean loss and fault-injected loss weighted for loss
(for differentiable controller search or fault-injection training).
"""
import threading
from collections import defaultdict
import six
import numpy as np
import torch
from scipy.stats import binom
from torch import nn
from aw_nas import utils
from aw_nas.utils.torch_utils import accuracy
from aw_nas.objective.base import BaseObjective
from aw_nas.utils.exception import expect, ConfigException
def _get_average_meter_defaultdict():
return defaultdict(utils.AverageMeter)
class FaultInjector(object):
def __init__(self, gaussian_std=1., mode="fixed", tile_size=None, max_value_mode=True,
macwise_inputdep_rate=1.0):
self.tile_size = tile_size
self.random_inject = 0.001
self.gaussian_std = gaussian_std
self.mode = mode
self.max_value_mode = max_value_mode
self.m_i_rate = macwise_inputdep_rate
self.fault_bit_list = np.array([2**x for x in range(8)] + [-2**x for x in range(8)],
dtype=np.float32)
def set_random_inject(self, value):
self.random_inject = value
def set_gaussian_std(self, value):
self.gaussian_std = value
def inject_gaussian(self, out):
gaussian = torch.randn(out.shape, dtype=out.dtype, device=out.device) * self.gaussian_std
out = out + gaussian
return out
def inject_saltandpepper(self, out):
random_tensor = out.new(out.size()).random_(0, 2*int(1./self.random_inject))
salt_ind = (random_tensor == 0)
pepper_ind = (random_tensor == 1)
max_ = torch.max(torch.abs(out)).cpu().data
out[salt_ind] = 0
out[pepper_ind] = max_
return out
def inject_bitflip(self, out):
tile_size = self.tile_size
if tile_size is None:
tile_size = tuple(out.size())
repeat = None
else:
repeat = np.ceil(np.array(out.size()) / np.array(tile_size)).astype(np.int)
random_tensor = out.new(torch.Size(tile_size)).random_(0, int(1. / self.random_inject))
if repeat is not None:
random_tensor = random_tensor.repeat(*repeat)
# using bitflip, must have been quantized!
# FIXME: nics_fix_pytorch should keep this the same as out.device, isn't it...
scale = out.data_cfg["scale"].to(out.device)
bitwidth = out.data_cfg["bitwidth"].to(out.device)
step = torch.pow(torch.autograd.Variable(torch.FloatTensor([2.]).to(out.device),
requires_grad=False),
(scale.float() - bitwidth.float()))
fault_ind = (random_tensor < 1)
fault_mask = np.round(np.exp(np.random.randint(0, 8, size=fault_ind.sum().cpu().data)\
* np.log(2))).astype(np.int32)
random_tensor.zero_()
random_tensor[fault_ind] = torch.tensor(fault_mask).to(out.device).float()
random_tensor = random_tensor.to(torch.int32)
# FIXME: negative correct? no... if assume random bit-flip in complement representation
# can realize flip sign-bit by -256(Q=8, complement = 1000 000)
# but only support Q=8,16,32,64
# sure we can implement this flip ourself by bias operation,
# but i'don't think this would be more reliable than the bias model, actually...
ori_type = out.dtype
out = (out.div_(step).to(torch.int32) ^ random_tensor).to(ori_type).mul_(step)
return out
def inject_fixed(self, out, n_mac=1):
if not hasattr(out, "data_cfg"):
# skip connections that are added together should share the same
# fixed quantization, but now, this is not supported by nics_fix_pt.
# so, estimate the quantization config here
scale = torch.ceil(torch.log(
torch.max(torch.max(torch.abs(out)),
torch.tensor(1e-5).float().to(out.device))) / np.log(2.))
bitwidth = torch.tensor([8]).to(int).to(out.device) # default 8
max_ = float(torch.max(torch.abs(out)).cpu().data.numpy())
else:
scale = out.data_cfg["scale"].to(out.device)
bitwidth = out.data_cfg["bitwidth"].to(out.device)
max_ = float((2**scale.float()).cpu().data.numpy())
step = torch.pow(torch.autograd.Variable(torch.FloatTensor([2.]).to(out.device),
requires_grad=False),
(scale.float() - (bitwidth.float() - 1)))
# ---- handle tile ----
# Currently, only the fault position is tiled,
# the correlation between the biases at tiling positions is not considered
# However, easy to modify
tile_size = self.tile_size
if tile_size is None:
tile_size = list(out.shape)
repeat = None
else:
repeat = np.ceil(np.array(out.size())[1:] / np.array(tile_size)).astype(np.int)
tile_size = [out.shape[0]] + list(tile_size)
random_inject = self.random_inject
bitwidth_data = int(bitwidth.cpu().data.numpy())
# ---- handle n_mac ----
# if n_mac != 1:
n_addi_affect_bits = int(np.floor(np.log2(n_mac)))
# if n_mac is very large, define the fraction length as F_w = (Q_w-1) - S_w
# n_addi_affect_bits <= F_w + F_i - F_o; S_w + S_i - S_o >= 0
# so n_addi_affect_bits <= (Q_w-1) + (Q_i-1) - (Q_o-1)
# currently, we assume Q_w == Q_i == Q_o == bitwidth_data
n_addi_affect_bits = min(bitwidth_data - 1, n_addi_affect_bits)
random_inject = random_inject * \
(float(n_addi_affect_bits + bitwidth_data) / bitwidth_data)
# ---- generate fault position mask ----
random_tensor = out.new(torch.Size(tile_size)).random_(0, int(1. / random_inject))
if repeat is not None:
random_tensor = random_tensor.repeat(1, *repeat)\
[:, :out.shape[1], :out.shape[2], :out.shape[3]]
fault_ind = (random_tensor < 1)
random_tensor.zero_()
# ---- generate bias ----
if self.max_value_mode:
fault_bias = step * 128.
random_tensor[fault_ind] = fault_bias
# elif n_mac != 1:
else:
_n_err = bitwidth_data + n_addi_affect_bits
fault_bit_list = np.array([2**x for x in range(-n_addi_affect_bits, bitwidth_data)] + \
[-2**x for x in range(-n_addi_affect_bits, bitwidth_data)],
dtype=np.float32)
size_ = fault_ind.sum().cpu().numpy()
n_bias = n_mac if self.m_i_rate == 1.0 else\
torch.tensor(binom.rvs(int(n_mac), self.m_i_rate, size=size_).astype(np.float32)).to(out.device)
random_tensor[fault_ind] = step * \
(n_bias * torch.tensor(fault_bit_list[
np.random.randint(
0, 2 * _n_err,
size=size_)])\
.to(out.device)).floor()
# else:
# random_tensor[fault_ind] = step * \
# (torch.tensor(
# self.fault_bit_list[np.random.randint(
# 0, 16, size=fault_ind.sum().cpu().data)]
# ).to(out.device).floor())
out = out + random_tensor
# TODO: tile + cin
# clip
out.clamp_(min=-max_, max=max_)
# # for masked bp
# normal_mask = torch.ones_like(out)
# normal_mask[fault_ind] = 0
# masked = normal_mask * out
# out = (out - masked).detach() + masked
return out
def inject(self, out, **kwargs):
return eval("self.inject_" + self.mode)(out, **kwargs) #pylint: disable=eval-used
class FaultInjectionObjective(BaseObjective):
NAME = "fault_injection"
SCHEDULABLE_ATTRS = ["fault_reward_coeff", "fault_loss_coeff", "latency_reward_coeff", "inject_prob", "gaussian_std"]
def __init__(self, search_space,
fault_modes="gaussian", gaussian_std=1., inject_prob=0.001, max_value_mode=True,
inject_macwise_inputdep_rate=1.0,
inject_n_cin=None,
inject_tile_size=None, # c_o, h_o, w_o
inject_propto_flops=False,
activation_fixed_bitwidth=None,
# loss
fault_loss_coeff=0.,
as_controller_regularization=False,
as_evaluator_regularization=False,
# reward
fault_reward_coeff=0.2,
latency_reward_coeff=0.,
calc_latency=True,
schedule_cfg=None):
super(FaultInjectionObjective, self).__init__(search_space, schedule_cfg)
assert 0. <= fault_reward_coeff <= 1.
self.injector = FaultInjector(gaussian_std, fault_modes, inject_tile_size,
max_value_mode=max_value_mode,
macwise_inputdep_rate=inject_macwise_inputdep_rate)
self.inject_n_cin = inject_n_cin
self.injector.set_random_inject(inject_prob)
self.fault_loss_coeff = fault_loss_coeff
self.as_controller_regularization = as_controller_regularization
self.as_evaluator_regularization = as_evaluator_regularization
if self.fault_loss_coeff > 0:
expect(self.as_controller_regularization or self.as_evaluator_regularization,
"When `fault_loss_coeff` > 0, you should either use this fault-injected loss"
" as controller regularization or as evaluator regularization, or both. "
"By setting `as_controller_regularization` and `as_evaluator_regularization`.",
ConfigException)
self.fault_reward_coeff = fault_reward_coeff
self.latency_reward_coeff = latency_reward_coeff
self.calc_latency = calc_latency
if not self.calc_latency:
expect(latency_reward_coeff == 0,
"`latency_reward_coeff` must equal 0 when latency is not calculated",
ConfigException)
self.inject_propto_flops = inject_propto_flops
#if self.inject_propto_flops:
# expect(fault_modes == "fixed",
# "When `inject_propto_flops` is True, must use the bit-flip fault mode `fixed`",
# ConfigException)
self.inject_prob_avg_meters = defaultdict(utils.AverageMeter)
self.cls_inject_prob_avg_meters = defaultdict(lambda: defaultdict(utils.AverageMeter))
self.activation_fixed_bitwidth = activation_fixed_bitwidth
self._init_thread_local()
@classmethod
def supported_data_types(cls):
return ["image"]
def perf_names(cls):
return ["acc_clean", "acc_fault", "flops"]
def get_reward(self, inputs, outputs, targets, cand_net):
perfs = self.get_perfs(inputs, outputs, targets, cand_net)
if not self.calc_latency:
return perfs[0] * (1 - self.fault_reward_coeff) + perfs[1] * self.fault_reward_coeff
return perfs[0] * (1 - self.fault_reward_coeff) + \
perfs[1] * self.fault_reward_coeff + perfs[2] * self.latency_reward_coeff
def get_perfs(self, inputs, outputs, targets, cand_net):
"""
Get top-1 acc.
"""
outputs_f = cand_net.forward_one_step_callback(inputs, callback=self.inject)
if hasattr(cand_net, "super_net"):
cand_net.super_net.reset_flops()
if self.calc_latency:
cand_net.forward(inputs)
if isinstance(cand_net, nn.DataParallel):
flops = cand_net.module.total_flops
else:
flops = cand_net.super_net.total_flops if hasattr(cand_net, "super_net") else \
cand_net.total_flops
if hasattr(cand_net, "super_net"):
cand_net.super_net._flops_calculated = True
return float(accuracy(outputs, targets)[0]) / 100, \
float(accuracy(outputs_f, targets)[0]) / 100, \
1 / max(flops * 1e-6 - 180, 20)
return float(accuracy(outputs, targets)[0]) / 100, \
float(accuracy(outputs_f, targets)[0]) / 100, \
def get_loss(self, inputs, outputs, targets, cand_net,
add_controller_regularization=True, add_evaluator_regularization=True):
"""
Get the cross entropy loss *tensor*, optionally add regluarization loss.
Args:
inputs: data inputs
outputs: logits
targets: labels
"""
loss = nn.CrossEntropyLoss()(outputs, targets)
if self.fault_loss_coeff > 0 and \
((add_controller_regularization and self.as_controller_regularization) or \
(add_evaluator_regularization and self.as_evaluator_regularization)):
# only forward and random inject once, this might not be of high variance
# for differentiable controller training?
outputs_f = cand_net.forward_one_step_callback(inputs, callback=self.inject)
ce_loss_f = nn.CrossEntropyLoss()(outputs_f, targets)
loss = (1 - self.fault_loss_coeff) * loss + self.fault_loss_coeff * ce_loss_f
return loss
def inject(self, state, context):
# This method can be call concurrently when using `DataParallel.forward_one_step_callback`
# Add a lock to protect the critic section
if self.activation_fixed_bitwidth:
# quantize the activation
# NOTE: the quantization of the weights is done in nfp patch,
# see `examples/fixed_point_patch.py`
# import this manually before creating operations, or soft-link this script under
# plugin dir to enable quantization for the weights
state = context.last_state = self.thread_local.fix(state)
if context.is_last_concat_op or not context.is_last_inject:
return
assert state is context.last_state
with self.thread_lock:
if self.inject_propto_flops:
mod = context.last_conv_module
backup_inject_prob = self.inject_prob
if mod is None:
return # last op is not conv op
if self.inject_n_cin is not None:
if mod.groups != 1:
# FIXME: currently, assume depthwise, (other group-conv not supported)
# each OFM value is calculated without adder tree, just MAC
n_mac = mod.kernel_size[0] * mod.kernel_size[1]
else:
inject_prob = 1 - (1 - backup_inject_prob) ** self.inject_n_cin
n_mac = np.ceil(float(mod.in_channels / self.inject_n_cin)) * \
mod.kernel_size[0] * mod.kernel_size[1]
self.inject_prob = inject_prob
else:
mul_per_loc = mod.in_channels / mod.groups * \
mod.kernel_size[0] * mod.kernel_size[1]
inject_prob = 1 - (1 - backup_inject_prob) ** mul_per_loc
n_mac = 1
self.inject_prob = inject_prob
self.inject_prob_avg_meters[context.index].update(self.inject_prob)
if mod.groups > 1:
# sep conv
cls_name = "conv_{}x{}".format(mod.kernel_size[0], mod.kernel_size[1])
else:
# normal conv
cls_name = "conv_Cx{}x{}".format(mod.kernel_size[0], mod.kernel_size[1])
self.cls_inject_prob_avg_meters[cls_name][context.index].update(self.inject_prob)
context.last_state = self.injector.inject(state, n_mac=n_mac)
if self.inject_propto_flops:
self.inject_prob = backup_inject_prob
def on_epoch_end(self, epoch):
super(FaultInjectionObjective, self).on_epoch_end(epoch)
if self.inject_prob_avg_meters:
# in final trianing, if the base inject prob do not vary, the inject prob of the same
# position/feature map should always be the same.
stats = [(ind, meter.avg) for ind, meter in six.iteritems(self.inject_prob_avg_meters)]
num_pos = len(stats) # number of inject position
stats = sorted(stats, key=lambda stat: stat[1])
mean_prob = np.mean([stat[1] for stat in stats])
geomean_prob = np.prod([stat[1] for stat in stats])**(1.0/num_pos)
self.logger.info("[NOTE: not meaningful in search, as every pass the same index "
"corresponds to different op] Num feature map injected: %3d; "
"Inject prob range: [%.4f (%s), %.4f (%s)]; "
"Mean: %.4f ; Geometric mean: %.4f",
num_pos, stats[0][1], stats[0][0], stats[-1][1], stats[-1][0],
mean_prob, geomean_prob)
self.inject_prob_avg_meters = defaultdict(utils.AverageMeter) # reset
# mean according to operation types
for cls_name, avg_meters in sorted(self.cls_inject_prob_avg_meters.items(),
key=lambda item: item[0]):
stats = [(ind, meter.avg) for ind, meter in six.iteritems(avg_meters)]
num_pos = len(stats) # number of inject position
stats = sorted(stats, key=lambda stat: stat[1])
mean_prob = np.mean([stat[1] for stat in stats])
geomean_prob = np.prod([stat[1] for stat in stats])**(1.0/num_pos)
self.logger.info("Type: %s: Num feature map injected: %3d; "
"Inject prob range: [%.4f (%s), %.4f (%s)]; "
"Mean: %.4f ; Geometric mean: %.4f", cls_name,
num_pos, stats[0][1], stats[0][0], stats[-1][1], stats[-1][0],
mean_prob, geomean_prob)
self.cls_inject_prob_avg_meters = defaultdict(_get_average_meter_defaultdict)
@property
def inject_tile_size(self):
return self.injector.tile_size
@inject_tile_size.setter
def inject_tile_size(self, tile_size):
self.injector.tile_size = tile_size
@property
def inject_prob(self):
return self.injector.random_inject
@inject_prob.setter
def inject_prob(self, value):
self.injector.set_random_inject(value)
@property
def gaussian_std(self):
return self.injector.gaussian_std
@gaussian_std.setter
def gaussian_std(self, value):
self.injector.set_gaussian_std(value)
def __getstate__(self):
state = super(FaultInjectionObjective, self).__getstate__()
del state["thread_lock"]
if "thread_local" in state:
del state["thread_local"]
return state
def __setstate__(self, state):
super(FaultInjectionObjective, self).__setstate__(state)
self._init_thread_local()
def _init_thread_local(self):
if self.activation_fixed_bitwidth:
import nics_fix_pt.nn_fix as nfp
self.thread_local = utils.LazyThreadLocal(creator_map={
"fix": lambda: nfp.Activation_fix(nf_fix_params={
"activation": {
# auto fix
"method": torch.autograd.Variable(torch.IntTensor(np.array([1])),
requires_grad=False),
# not meaningful
"scale": torch.autograd.Variable(torch.IntTensor(np.array([0])),
requires_grad=False),
"bitwidth": torch.autograd.Variable(torch.IntTensor(
np.array([self.activation_fixed_bitwidth])),
requires_grad=False)
}
})
})
self.thread_lock = threading.Lock()
|
mayan/apps/checkouts/dashboard_widgets.py | eshbeata/open-paperless | 2,743 | 11148803 | <filename>mayan/apps/checkouts/dashboard_widgets.py<gh_stars>1000+
from __future__ import absolute_import, unicode_literals
from django.apps import apps
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from common.classes import DashboardWidget
def checkedout_documents_queryset():
DocumentCheckout = apps.get_model(
app_label='checkouts', model_name='DocumentCheckout'
)
return DocumentCheckout.objects.all()
widget_checkouts = DashboardWidget(
label=_('Checkedout documents'),
link=reverse_lazy('checkouts:checkout_list'),
icon='fa fa-shopping-cart', queryset=checkedout_documents_queryset
)
|
dislash/__init__.py | Bakersbakebread/dislash.py | 371 | 11148845 | __version__ = "1.5.0"
from .interactions import *
from .application_commands import *
slash_commands = application_commands
|
pyNastran/gui/menus/groups_modify/interface.py | ACea15/pyNastran | 293 | 11148849 | <filename>pyNastran/gui/menus/groups_modify/interface.py
from pyNastran.gui.menus.groups_modify.groups_modify import GroupsModify
def on_set_modify_groups(self):
"""
Opens a dialog box to set:
+--------+----------+
| Name | String |
+--------+----------+
| Min | Float |
+--------+----------+
| Max | Float |
+--------+----------+
| Format | pyString |
+--------+----------+
"""
if not len(self.groups): # no 'main' group
self.log_error('No main group to create.')
return
#print('groups.keys() = %s' % list(self.groups.keys()))
group_active = self.group_active
assert isinstance(group_active, str), group_active
data = {
'font_size' : self.settings.font_size,
0 : self.groups['main'],
'clicked_ok' : False,
'close' : False,
}
i = 1
for name, group in sorted(self.groups.items()):
if name == 'main':
continue
data[i] = group
i += 1
if not self._modify_groups_window_shown:
self._modify_groups_window = GroupsModify(
data, win_parent=self, group_active=group_active)
self._modify_groups_window.show()
self._modify_groups_window_shown = True
self._modify_groups_window.exec_()
else:
self._modify_groups_window.activateWindow()
if data['close']:
if not self._modify_groups_window._updated_groups:
self._apply_modify_groups(data)
self._modify_groups_window_shown = False
del self._modify_groups_window
else:
self._modify_groups_window.activateWindow()
|
OpenCLGA/simple_chromosome.py | czarnobylu/OpenCLGA | 112 | 11148857 | #!/usr/bin/python3
import numpy
import pyopencl as cl
from .simple_gene import SimpleGene
class SimpleChromosome:
# SimpleChromosome - a chromosome contains a list of Genes.
# __genes - a list of Genes
# __name - name of the chromosome
# __improving_func - a function name in kernel to gurantee a better mutation result.
# dna - an listed of Gene's dna
# dna_total_length - sum of the lenght of all genes's dna
def __init__(self, genes, name = ''):
assert all(isinstance(gene, SimpleGene) for gene in genes)
assert type(genes) == list
self.__genes = genes
self.__name = name
self.__improving_func = None
@property
def num_of_genes(self):
# The number of genes inside this SimpleChromosome.
return len(self.__genes)
@property
def name(self):
return self.__name
@property
def dna_total_length(self):
# Sum of the dna lenght of each gene.
return sum([gene.length for gene in self.__genes])
@property
def dna(self):
return [gene.dna for gene in self.__genes]
@dna.setter
def dna(self, dna_sequence):
assert self.num_of_genes == len(dna_sequence)
for i, gene in enumerate(self.__genes):
gene.dna = dna_sequence[i]
@property
def genes(self):
return self.__genes
@property
def gene_elements(self):
return [] if len(self.__genes) == 0 else self.__genes[0].elements
@property
def gene_elements_in_kernel(self):
return [] if len(self.__genes) == 0 else self.__genes[0].elements_in_kernel
@property
def kernel_file(self):
return 'simple_chromosome.cl'
@property
def struct_name(self):
return '__SimpleChromosome';
@property
def chromosome_size_define(self):
return 'SIMPLE_CHROMOSOME_GENE_SIZE'
def early_terminated(self, best , worst):
# If the difference between the best and the worst is negligible,
# terminate the program to save time.
return abs(worst - best) < 0.0001
def from_kernel_value(self, data):
# Construct a SimpleChromosome object on system memory according to
# the calculated 'data' on opencl(device) memory.
assert len(data) == self.num_of_genes
genes = [self.__genes[idx].from_kernel_value(v) for idx, v in enumerate(data)]
return SimpleChromosome(genes, self.__name)
def use_improving_only_mutation(self, helper_func_name):
# Set a helper function to make sure a better mutation result.
self.__improving_func = helper_func_name
def kernelize(self):
# - Build a str which contains c99-like codes. This str will be written
# into a final kernel document called 'final.cl' for execution.
# - Gene elements, size, mutation function is pre-defined as MACRO for
# easier usage.
elements_size_list = [str(gene.elements_length) for gene in self.__genes]
candidates = '#define SIMPLE_CHROMOSOME_GENE_ELEMENTS_SIZE {' +\
', '.join(elements_size_list) + '}\n'
defines = '#define SIMPLE_CHROMOSOME_GENE_SIZE ' + str(self.num_of_genes) + '\n' +\
'#define SIMPLE_CHROMOSOME_GENE_MUTATE_FUNC ' +\
self.__genes[0].mutate_func_name + '\n'
return candidates + defines
def save(self, data, ctx, queue, population):
total_dna_size = population * self.dna_total_length
# prepare memory
other_chromosomes = numpy.zeros(total_dna_size, dtype=numpy.int32)
ratios = numpy.zeros(population, dtype=numpy.float32)
# read data from cl
cl.enqueue_read_buffer(queue, self.__dev_ratios, ratios)
cl.enqueue_read_buffer(queue, self.__dev_other_chromosomes, other_chromosomes).wait()
# save all of them
data['other_chromosomes'] = other_chromosomes
data['ratios'] = ratios
def restore(self, data, ctx, queue, population):
other_chromosomes = data['other_chromosomes']
ratios = data['ratios']
# prepare CL memory
mf = cl.mem_flags
self.__dev_ratios = cl.Buffer(ctx, mf.WRITE_ONLY, ratios.nbytes)
self.__dev_other_chromosomes = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=other_chromosomes)
# Copy data from main memory to GPU memory
cl.enqueue_copy(queue, self.__dev_ratios, ratios)
cl.enqueue_copy(queue, self.__dev_other_chromosomes, other_chromosomes)
def preexecute_kernels(self, ctx, queue, population):
# initialize global variables for kernel execution
total_dna_size = population * self.dna_total_length
other_chromosomes = numpy.zeros(total_dna_size, dtype=numpy.int32)
ratios = numpy.zeros(population, dtype=numpy.float32)
mf = cl.mem_flags
# prepare device memory for usage.
self.__dev_ratios = cl.Buffer(ctx, mf.WRITE_ONLY, ratios.nbytes)
self.__dev_other_chromosomes = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR,
hostbuf=other_chromosomes)
def get_populate_kernel_names(self):
return ['simple_chromosome_populate']
def get_crossover_kernel_names(self):
return ['simple_chromosome_calc_ratio',\
'simple_chromosome_pick_chromosomes',\
'simple_chromosome_do_crossover']
def get_mutation_kernel_names(self):
return ['simple_chromosome_mutate_all']
def execute_populate(self, prg, queue, population, dev_chromosomes, dev_rnum):
prg.simple_chromosome_populate(queue,
(population,),
(1,),
dev_chromosomes,
dev_rnum).wait()
def selection_preparation(self, prg, queue, dev_fitnesses):
prg.simple_chromosome_calc_ratio(queue,
(1,),
(1,),
dev_fitnesses,
self.__dev_ratios).wait()
def execute_get_current_elites(self, prg, queue, top,
dev_chromosomes, dev_current_elites,
dev_best_indices):
prg.simple_chromosome_get_the_elites(queue, (1,), (1,),
dev_best_indices,
dev_chromosomes,
dev_current_elites,
numpy.int32(top)).wait()
def execute_update_current_elites(self, prg, queue, top, dev_worst_indices,
dev_chromosomes, dev_updated_elites,
dev_fitnesses, dev_updated_elite_fitness):
prg.simple_chromosome_update_the_elites(queue, (1,), (1,),
numpy.int32(top),
dev_worst_indices,
dev_chromosomes,
dev_updated_elites,
dev_fitnesses,
dev_updated_elite_fitness).wait()
def execute_crossover(self, prg, queue, population, generation_idx, prob_crossover,
dev_chromosomes, dev_fitnesses, dev_rnum, best_fitness):
prg.simple_chromosome_pick_chromosomes(queue,
(population,),
(1,),
dev_chromosomes,
dev_fitnesses,
self.__dev_other_chromosomes,
self.__dev_ratios,
dev_rnum).wait()
prg.simple_chromosome_do_crossover(queue,
(population,),
(1,),
dev_chromosomes,
dev_fitnesses,
self.__dev_other_chromosomes,
dev_rnum,
numpy.float32(best_fitness),
numpy.float32(prob_crossover)).wait()
def execute_mutation(self, prg, queue, population, generation_idx, prob_mutate,
dev_chromosomes, dev_fitnesses, dev_rnum, extra_list):
prg.simple_chromosome_mutate_all(queue,
(population,),
(1,),
dev_chromosomes,
dev_rnum,
numpy.float32(prob_mutate)).wait()
|
optimus/engines/vaex/io/save.py | ironmussa/Optimus | 1,045 | 11148860 | import os
from optimus.helpers.functions import prepare_path_local, path_is_local
from optimus.helpers.logger import logger
from optimus.helpers.types import *
from optimus.engines.base.io.save import BaseSave
class Save(BaseSave):
def __init__(self, root: 'DataFrameType'):
self.root = root
def hdf5(self, path, conn, *args, **kwargs):
df = self.root.data
if conn is not None:
path = conn.path(path)
storage_options = conn.storage_options
try:
os.makedirs(path, exist_ok=True)
df.export_hdf5(path, *args, **kwargs)
except (OSError, IOError) as error:
logger.print(error)
raise
def json(self, path, storage_options=None, conn=None, *args, **kwargs):
df = self.root.data
if conn is not None:
path = conn.path(path)
storage_options = conn.storage_options
try:
os.makedirs(path, exist_ok=True)
df.to_json(filename=path, storage_options=storage_options, *args, **kwargs)
except (OSError, IOError) as error:
logger.print(error)
raise
# print("Creation of the directory %s failed" % path)
# else:
# print("Successfully created the directory %s" % path)
def csv(self, path, mode="wt", index=False, single_file=True, storage_options=None, conn=None, **kwargs):
df = self.root.data
if conn is not None:
path = conn.path(path)
storage_options = conn.storage_options
try:
if path_is_local(path):
prepare_path_local(path)
df.to_csv(filename=path, mode=mode, index=index, single_file=single_file, storage_options=storage_options,
**kwargs)
except IOError as error:
logger.print(error)
raise
def parquet(self, path, mode="overwrite", num_partitions=1, engine="pyarrow", storage_options=None, conn=None,
**kwargs):
# This character are invalid as column names by parquet
invalid_character = [" ", ",", ";", "{", "}", "(", ")", "\n", "\t", "="]
def func(col_name):
for i in invalid_character:
col_name = col_name.replace(i, "_")
return col_name
df = self.root.cols.rename(func)
if conn is not None:
path = conn.path(path)
storage_options = conn.storage_options
dfd = df.data
try:
if engine == 'pyarrow':
dfd.to_parquet(path, engine='pyarrow', mode=mode, storage_options=storage_options, **kwargs)
elif engine == "fastparquet":
dfd.to_parquet(path, engine='fastparquet', mode=mode, storage_options=storage_options, **kwargs)
except IOError as e:
logger.print(e)
raise
@staticmethod
def avro(path):
raise NotImplementedError('Not implemented yet')
|
tardis/io/tests/test_config_reader.py | ahmedo42/tardis | 176 | 11148866 | # tests for the config reader module
import os
from attr import validate
import pytest
import pandas as pd
from numpy.testing import assert_almost_equal
from jsonschema.exceptions import ValidationError
from tardis.io import config_reader
from tardis.io.config_reader import Configuration
def data_path(filename):
data_dir = os.path.dirname(__file__)
return os.path.abspath(os.path.join(data_dir, "data", filename))
def test_convergence_section_parser():
test_convergence_section = {
"type": "damped",
"lock_t_inner_cyles": 1,
"t_inner_update_exponent": -0.5,
"damping_constant": 0.5,
"threshold": 0.05,
"fraction": 0.8,
"hold_iterations": 3,
"t_rad": {"damping_constant": 1.0},
}
parsed_convergence_section = config_reader.parse_convergence_section(
test_convergence_section
)
assert_almost_equal(
parsed_convergence_section["t_rad"]["damping_constant"], 1.0
)
assert_almost_equal(
parsed_convergence_section["w"]["damping_constant"], 0.5
)
def test_from_config_dict(tardis_config_verysimple):
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
assert conf.config_dirname == "test"
assert_almost_equal(
conf.spectrum.start.value,
tardis_config_verysimple["spectrum"]["start"].value,
)
assert_almost_equal(
conf.spectrum.stop.value,
tardis_config_verysimple["spectrum"]["stop"].value,
)
tardis_config_verysimple["spectrum"]["start"] = "Invalid"
with pytest.raises(ValidationError):
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
def test_config_hdf(hdf_file_path, tardis_config_verysimple):
expected = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
expected.to_hdf(hdf_file_path, overwrite=True)
actual = pd.read_hdf(hdf_file_path, key="/simulation/config")
expected = expected.get_properties()["config"]
assert actual[0] == expected[0]
def test_model_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Model Section of the Tardis Config YAML File
Validates:
Density: branch85_w7
Velocity (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
assert conf.model.structure.density.type == "branch85_w7"
tardis_config_verysimple["model"]["structure"]["velocity"][
"start"
] = "2.0e4 km/s"
tardis_config_verysimple["model"]["structure"]["velocity"][
"stop"
] = "1.1e4 km/s"
with pytest.raises(ValueError) as ve:
if (
conf.model.structure.velocity.start
< conf.model.structure.velocity.stop
):
raise ValueError("Stop Value must be greater than Start Value")
assert ve.type is ValueError
def test_supernova_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Supernova Section of the Tardis Config YAML File
Validates:
Time of Explosion (Must always be positive)
Luminosity Wavelength Limits (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["supernova"]["time_explosion"] = "-10 day"
tardis_config_verysimple["supernova"][
"luminosity_wavelength_start"
] = "15 angstrom"
tardis_config_verysimple["supernova"][
"luminosity_wavelength_end"
] = "0 angstrom"
with pytest.raises(ValueError) as ve:
if conf.supernova.time_explosion.value > 0:
raise ValueError("Time of Explosion cannot be negative")
assert ve.type is ValueError
with pytest.raises(ValueError) as ve:
if (
conf.supernova.luminosity_wavelength_start.value
< conf.supernova.luminosity_wavelength_end.value
):
raise ValueError(
"End Limit must be greater than Start Limit for Luminosity"
)
assert ve.type is ValueError
def test_plasma_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Plasma Section of the Tardis Config YAML File
Validates:
Initial temperature inner (must be greater than -1K)
Initial radiative temperature (must be greater than -1K)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["plasma"]["initial_t_inner"] = "-100 K"
tardis_config_verysimple["plasma"]["initial_t_rad"] = "-100 K"
with pytest.raises(ValueError) as ve:
if (conf.plasma.initial_t_inner.value >= -1) and (
conf.plasma.initial_t_rad.value >= -1
):
raise ValueError("Initial Temperatures are Invalid")
assert ve.type is ValueError
def test_spectrum_section_config(tardis_config_verysimple):
"""
Configuration Validation Test for Plasma Section of the Tardis Config YAML File
Validates:
Spectrum Start & End Limits (Start < End)
Parameter
---------
`tardis_config_verysimple` : YAML File
Result
------
Assertion based on validation for specified values
"""
conf = Configuration.from_config_dict(
tardis_config_verysimple, validate=True, config_dirname="test"
)
tardis_config_verysimple["spectrum"]["start"] = "2500 angstrom"
tardis_config_verysimple["spectrum"]["stop"] = "500 angstrom"
with pytest.raises(ValueError) as ve:
if not conf.spectrum.stop.value < conf.spectrum.start.value:
raise ValueError("Start Value must be less than Stop Value")
assert ve.type is ValueError
|
alipay/aop/api/response/AlipayEbppInstserviceTokenCreateResponse.py | antopen/alipay-sdk-python-all | 213 | 11148877 | <reponame>antopen/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEbppInstserviceTokenCreateResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppInstserviceTokenCreateResponse, self).__init__()
self._sign_token = None
@property
def sign_token(self):
return self._sign_token
@sign_token.setter
def sign_token(self, value):
self._sign_token = value
def parse_response_content(self, response_content):
response = super(AlipayEbppInstserviceTokenCreateResponse, self).parse_response_content(response_content)
if 'sign_token' in response:
self.sign_token = response['sign_token']
|
deepfigures/settings.py | mdcatapult/deepfigures-open | 103 | 11148965 | <gh_stars>100-1000
"""Constants and settings for deepfigures."""
import logging
import os
logger = logging.getLogger(__name__)
# path to the deepfigures project root
BASE_DIR = os.path.dirname(
os.path.dirname(os.path.realpath(__file__)))
# version number for the current release
VERSION = '0.0.1'
# descriptions of the docker images deepfigures builds
DEEPFIGURES_IMAGES = {
'cpu': {
'tag': 'deepfigures-cpu',
'dockerfile_path': os.path.join(BASE_DIR, 'dockerfiles/cpu/Dockerfile')
},
'gpu': {
'tag': 'deepfigures-gpu',
'dockerfile_path': os.path.join(BASE_DIR, 'dockerfiles/gpu/Dockerfile')
}
}
# path to the directory containing all the project-level test data.
TEST_DATA_DIR = os.path.join(BASE_DIR, 'tests/data')
# settings for PDFRenderers
DEFAULT_INFERENCE_DPI = 100
DEFAULT_CROPPED_IMG_DPI = 200
BACKGROUND_COLOR = 255
# weights for the model
TENSORBOX_MODEL = {
'save_dir': os.path.join(BASE_DIR, 'weights/'),
'iteration': 500000
}
# paths to binary dependencies
PDFFIGURES_JAR_NAME = 'pdffigures2-assembly-0.0.12-SNAPSHOT.jar'
PDFFIGURES_JAR_PATH = os.path.join(
BASE_DIR,
'bin/',
PDFFIGURES_JAR_NAME)
# PDF Rendering backend settings
DEEPFIGURES_PDF_RENDERER = 'deepfigures.extraction.renderers.GhostScriptRenderer'
# settings for data generation
# The location to temporarily store arxiv source data
ARXIV_DATA_TMP_DIR = ''
# The location to store the final output labels
ARXIV_DATA_OUTPUT_DIR = ''
# The location of the PMC open access data
PUBMED_INPUT_DIR = ''
# A directory for storing intermediate results
PUBMED_INTERMEDIATE_DIR = ''
# A directory for storing the output pubmed data
PUBMED_DISTANT_DATA_DIR = ''
# a local directory for storing the output data
LOCAL_PUBMED_DISTANT_DATA_DIR = ''
|
core/__init__.py | azurlane-doujin/AzurLanePaintingExtract-v1.0 | 144 | 11148973 | __all__=["assets", "src"] |
setup.py | alicanb/probtorch | 876 | 11148986 | import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
import setuptools.command.build_py
def get_version():
try:
import subprocess
CWD = os.path.dirname(os.path.abspath(__file__))
rev = subprocess.check_output("git rev-parse --short HEAD".split(), cwd=CWD)
version = "0.0+" + str(rev.strip().decode('utf-8'))
return version
except Exception:
return "0.0"
# Package meta-data.
NAME = 'probtorch'
DESCRIPTION = 'Probabilistic Torch is library for deep generative models that extends PyTorch'
URL = 'https://github.com/probtorch/probtorch'
VERSION = get_version()
REQUIRED = [
'torch',
]
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
class build_py(setuptools.command.build_py.build_py):
def run(self):
self.create_version_file()
setuptools.command.build_py.build_py.run(self)
@staticmethod
def create_version_file():
print('-- Building version ' + VERSION)
version_path = os.path.join(here, 'probtorch', 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(VERSION))
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds...')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution...')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine...')
os.system('twine upload dist/*')
sys.exit()
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
url=URL,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIRED,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
],
# $ setup.py publish support.
cmdclass={
'build_py': build_py,
'upload': UploadCommand,
},
)
|
spacy/tests/lang/test_initialize.py | snosrap/spaCy | 22,040 | 11149051 | <gh_stars>1000+
import pytest
from spacy.util import get_lang_class
# fmt: off
# Only include languages with no external dependencies
# excluded: ja, ko, th, vi, zh
LANGUAGES = ["af", "am", "ar", "az", "bg", "bn", "ca", "cs", "da", "de", "el",
"en", "es", "et", "eu", "fa", "fi", "fr", "ga", "gu", "he", "hi",
"hr", "hu", "hy", "id", "is", "it", "kn", "ky", "lb", "lt", "lv",
"mk", "ml", "mr", "nb", "ne", "nl", "pl", "pt", "ro", "ru", "sa",
"si", "sk", "sl", "sq", "sr", "sv", "ta", "te", "ti", "tl", "tn",
"tr", "tt", "uk", "ur", "xx", "yo"]
# fmt: on
@pytest.mark.parametrize("lang", LANGUAGES)
def test_lang_initialize(lang, capfd):
"""Test that languages can be initialized."""
nlp = get_lang_class(lang)()
# Check for stray print statements (see #3342)
doc = nlp("test") # noqa: F841
captured = capfd.readouterr()
assert not captured.out
|
haproxystats/__init__.py | unixsurfer/haproxystats | 104 | 11149074 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
"""A collection of Python tools to process HAProxy statistics."""
__title__ = 'haproxystats'
__author__ = '<NAME>'
__license__ = 'Apache 2.0'
__version__ = '0.5.2'
__copyright__ = 'Copyright 2016 <NAME> <<EMAIL>'
DEFAULT_OPTIONS = {
'DEFAULT': {
'retries': 2,
'timeout': 1,
'interval': 2,
'loglevel': 'info',
},
'paths': {
'base-dir': '/var/lib/haproxystats',
},
'pull': {
'retries': 1,
'timeout': 0.1,
'interval': 0.5,
'pull-timeout': 2,
'pull-interval': 10,
'buffer-limit': 6291456,
'dst-dir': '/var/lib/haproxystats/incoming',
'tmp-dst-dir': '/var/lib/haproxystats/incoming.tmp',
'workers': 8,
'queue-size': 360,
},
'process': {
'workers': '4',
'src-dir': '/var/lib/haproxystats/incoming',
'aggr-server-metrics': 'false',
'per-process-metrics': 'false',
'calculate-percentages': 'false',
'liveness-check-interval': 10,
},
'graphite': {
'server': '127.0.0.1',
'port': 3002,
'retries': 3,
'interval': 1.8,
'connect-timeout': 1.0,
'write-timeout': 1.0,
'delay': 10,
'backoff': 2,
'namespace': 'loadbalancers',
'prefix-hostname': 'true',
'fqdn': 'true',
'queue-size': 1000000
},
}
|
tools/simnet/train/tf/tools/tf_record_reader.py | comeonfox/AnyQ | 2,414 | 11149109 | <filename>tools/simnet/train/tf/tools/tf_record_reader.py
#coding=utf-8
# Copyright (c) 2018 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
import logging
import numpy
import json
import time
import sys
import os
import tensorflow as tf
_WORK_DIR = os.path.split(os.path.realpath(__file__))[0]
_UPPER_DIR = os.path.split(_WORK_DIR)[0]
sys.path.append(_UPPER_DIR)
from utils import datafeeds
def load_config(config_file):
"""
load config
"""
with open(config_file, "r") as f:
try:
conf = json.load(f)
except Exception:
logging.error("load json file %s error" % config_file)
conf_dict = {}
unused = [conf_dict.update(conf[k]) for k in conf]
logging.debug("\n".join(["%s=%s" % (u, conf_dict[u]) for u in conf_dict]))
return conf_dict
def read_tfrecords_pointwise(config):
"""
read tf records
"""
datafeed = datafeeds.TFPointwisePaddingData(config)
input_l, input_r, label_y = datafeed.ops()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
start_time = time.time()
sess = tf.InteractiveSession()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
step = 0
while not coord.should_stop():
step += 1
try:
left_, right_, label_ = sess.run([input_l, input_r, label_y])
print "pointwise data read is good"
except tf.errors.OutOfRangeError:
print("read %d steps" % step)
coord.request_stop()
coord.join(threads)
duration = time.time() - start_time
print("duration: %ds, step: %d" % (duration, step))
sess.close()
def read_tfrecords_pairwise(config):
"""
read tf records
"""
datafeed = datafeeds.TFPairwisePaddingData(config)
query, pos, neg = datafeed.ops()
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
start_time = time.time()
sess = tf.InteractiveSession()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
step = 0
while not coord.should_stop():
step += 1
try:
query_, pos_, neg_ = sess.run([query, pos, neg])
print "pairwise data read is good"
except tf.errors.OutOfRangeError:
print("read %d steps" % step)
coord.request_stop()
coord.join(threads)
duration = time.time() - start_time
print("duration: %ds, step: %d" % (duration, step))
sess.close()
def usage():
"""
usage
"""
print sys.argv[0], "options"
print "options"
print "\tconfig_path: configure file path"
if __name__ == "__main__":
if len(sys.argv) != 2:
usage()
sys.exit(1)
config_path = sys.argv[1]
config = load_config(config_path)
data_format_func = {"pointwise": read_tfrecords_pointwise,
"pairwise": read_tfrecords_pairwise}
if config["training_mode"] in data_format_func:
using_func = data_format_func[config["training_mode"]]
else:
logging.error("data_format not supported")
sys.exit(1)
using_func(config)
|
tracing/tracing/mre/threaded_work_queue.py | tingshao/catapult | 1,894 | 11149122 | <reponame>tingshao/catapult<gh_stars>1000+
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import traceback
from six.moves import range # pylint: disable=redefined-builtin
try:
import queue
except ImportError:
import six.moves.queue as queue # pylint: disable=import-error
class ThreadedWorkQueue(object):
def __init__(self, num_threads):
self._num_threads = num_threads
self._main_thread_tasks = None
self._any_thread_tasks = None
self._running = False
self._stop = False
self._stop_result = None
self.Reset()
@property
def is_running(self):
return self._running
def Run(self):
if self.is_running:
raise Exception('Already running')
self._running = True
self._stop = False
self._stop_result = None
if self._num_threads == 1:
self._RunSingleThreaded()
else:
self._RunMultiThreaded()
self._main_thread_tasks = queue.Queue()
self._any_thread_tasks = queue.Queue()
r = self._stop_result
self._stop_result = None
self._running = False
return r
def Stop(self, stop_result=None):
if not self.is_running:
raise Exception('Not running')
if self._stop:
return False
self._stop_result = stop_result
self._stop = True
return True
def Reset(self):
assert not self.is_running
self._main_thread_tasks = queue.Queue()
self._any_thread_tasks = queue.Queue()
def PostMainThreadTask(self, cb, *args, **kwargs):
def RunTask():
cb(*args, **kwargs)
self._main_thread_tasks.put(RunTask)
def PostAnyThreadTask(self, cb, *args, **kwargs):
def RunTask():
cb(*args, **kwargs)
self._any_thread_tasks.put(RunTask)
def _TryToRunOneTask(self, task_queue, block=False):
if block:
try:
task = task_queue.get(True, 0.1)
except queue.Empty:
return
else:
if task_queue.empty():
return
task = task_queue.get()
try:
task()
except KeyboardInterrupt as ex:
raise ex
except Exception: # pylint: disable=broad-except
traceback.print_exc()
finally:
task_queue.task_done()
def _RunSingleThreaded(self):
while True:
if self._stop:
break
# Since this is single-threaded, if both task-lists are empty, then
# nothing will be able to add any more tasks to either task-queue.
if self._any_thread_tasks.empty() and self._main_thread_tasks.empty():
self.Stop()
break
self._TryToRunOneTask(self._any_thread_tasks)
self._TryToRunOneTask(self._main_thread_tasks)
def _RunMultiThreaded(self):
threads = []
for _ in range(self._num_threads):
t = threading.Thread(target=self._ThreadMain)
t.setDaemon(True)
t.start()
threads.append(t)
while True:
if self._stop:
break
self._TryToRunOneTask(self._main_thread_tasks)
for t in threads:
t.join()
def _ThreadMain(self):
while True:
if self._stop:
break
self._TryToRunOneTask(self._any_thread_tasks, block=True)
|
ikalog/ui/events.py | fetus-hina/IkaLog | 285 | 11149138 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 <NAME>
# Copyright (C) 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wx
import wx.lib.newevent
# New input file is entered. (see: PreviewPanel.on_input_file_button_click)
# Args: input_file (string)
(InputFileAddedEvent, EVT_INPUT_FILE_ADDED) = wx.lib.newevent.NewEvent()
# New input source is initialized. (see: VideoCapture.initialize_input)
# Args: source (string)
(InputInitializedEvent, EVT_INPUT_INITIALIZED) = wx.lib.newevent.NewEvent()
# Pause or play of IkaLog is intended. (see: PreviewPanel.on_ikalog_pause)
# Args: pause (bool)
(IkalogPauseEvent, EVT_IKALOG_PAUSE) = wx.lib.newevent.NewEvent()
|
yagmail/password.py | york-schlabrendorff-liqid/yagmail | 2,431 | 11149156 | try:
import keyring
except (ImportError, NameError, RuntimeError):
pass
def handle_password(user, password): # pragma: no cover
""" Handles getting the password"""
if password is None:
try:
password = keyring.get_password("y<PASSWORD>", user)
except NameError as e:
print(
"'keyring' cannot be loaded. Try 'pip install keyring' or continue without. See https://github.com/kootenpv/yagmail"
)
raise e
if password is None:
import getpass
password = getpass.getpass("Password for <{0}>: ".format(user))
answer = ""
# Python 2 fix
while answer != "y" and answer != "n":
prompt_string = "Save username and password in keyring? [y/n]: "
# pylint: disable=undefined-variable
try:
answer = raw_input(prompt_string).strip()
except NameError:
answer = input(prompt_string).strip()
if answer == "y":
register(user, password)
return password
def register(username, password):
""" Use this to add a new gmail account to your OS' keyring so it can be used in yagmail """
keyring.set_password("<PASSWORD>", username, password)
|
data/transcoder_evaluation_gfg/python/COUNT_FREQUENCY_K_MATRIX_SIZE_N_MATRIXI_J_IJ.py | mxl1n/CodeGen | 241 | 11149177 | <filename>data/transcoder_evaluation_gfg/python/COUNT_FREQUENCY_K_MATRIX_SIZE_N_MATRIXI_J_IJ.py
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold(n, k):
if (n + 1 >= k):
return (k - 1)
else:
return (2 * n + 1 - k)
#TOFILL
if __name__ == '__main__':
param = [
(90, 74,),
(86, 36,),
(92, 38,),
(72, 71,),
(25, 57,),
(11, 53,),
(94, 80,),
(91, 75,),
(66, 58,),
(34, 88,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success += 1
print("#Results: %i, %i" % (n_success, len(param)))
|
search/linear_search/python/bm.py | CarbonDDR/al-go-rithms | 1,253 | 11149179 | '''
Boyer–Moore string-search algorithm is an efficient string-searching algorithm
that is the standard benchmark for practical string-search literature.The
algorithm preprocesses the string being searched for (the pattern), but not
the string being searched in (the text).The Boyer-Moore algorithm uses
information gathered during the preprocess step to skip sections of the text,
resulting in a lower constant factor than many other string search algorithms.
The key features of the algorithm are to match on the tail of the pattern
rather than the head, and to skip along the text in jumps of multiple
characters rather than searching every single character in the text.
'''
def boyer_moore(text,pattern):
n = len(text) # lenght of text
m = len(pattern) # length of pattern
if m == 0:
return None
last = {} # build 'last' dictionary
for k in range(m):
last[pattern[k]] = max(1,m-k-1) # bad match table
t = m-1 # an index into text
p = m-1 # an index into pattern
while t < n:
if text[t] == pattern[p]:
if p == 0:
return t
else:
t -= 1
p -= 1
else:
j = last.get(text[t],-1)
t += m-min(p,j+1)
p = m-1
return None
|
akshare/futures/futures_rule.py | lisong996/akshare | 4,202 | 11149206 | # -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
Date: 2020/7/12 21:51
Desc: 国泰君安期货-交易日历数据表
https://www.gtjaqh.com/pc/calendar.html
"""
import pandas as pd
import requests
def futures_rule(trade_date: str = "20200712") -> pd.DataFrame:
"""
国泰君安期货-交易日历数据表
https://www.gtjaqh.com/pc/calendar.html
:return: 交易日历数据
:rtype: pandas.DataFrame
"""
url = "https://www.gtjaqh.com/fn/128"
params = {"base_date": f"{trade_date}"}
r = requests.post(url, json=params)
temp_df = pd.DataFrame(r.json()["data"])
temp_df = temp_df[temp_df["tradingday"] == trade_date]
if not temp_df["events"].values[0]:
return f"{trade_date} 查询时间过早或者不是交易日"
else:
table_df = pd.read_html(temp_df["events"].values[0][0]["content"], header=1)[0]
table_df.dropna(axis=1, how="all", inplace=True)
return table_df
if __name__ == '__main__':
futures_rule_df = futures_rule(trade_date="20210923")
print(futures_rule_df)
|
examples/Script/script_forward_frames.py | 4ndr3aR/depthai-python | 182 | 11149219 | #!/usr/bin/env python3
import cv2
import depthai as dai
# Start defining a pipeline
pipeline = dai.Pipeline()
cam = pipeline.create(dai.node.ColorCamera)
cam.initialControl.setManualFocus(130)
# Not needed, you can display 1080P frames as well
cam.setIspScale(1,2)
# Script node
script = pipeline.create(dai.node.Script)
script.setScript("""
ctrl = CameraControl()
ctrl.setCaptureStill(True)
# Initially send still event
node.io['ctrl'].send(ctrl)
normal = True
while True:
frame = node.io['frames'].get()
if normal:
ctrl.setAutoExposureCompensation(3)
node.io['stream1'].send(frame)
normal = False
else:
ctrl.setAutoExposureCompensation(-3)
node.io['stream2'].send(frame)
normal = True
node.io['ctrl'].send(ctrl)
""")
cam.still.link(script.inputs['frames'])
# XLinkOut
xout1 = pipeline.create(dai.node.XLinkOut)
xout1.setStreamName('stream1')
script.outputs['stream1'].link(xout1.input)
xout2 = pipeline.create(dai.node.XLinkOut)
xout2.setStreamName('stream2')
script.outputs['stream2'].link(xout2.input)
script.outputs['ctrl'].link(cam.inputControl)
# Connect to device with pipeline
with dai.Device(pipeline) as device:
qStream1 = device.getOutputQueue("stream1")
qStream2 = device.getOutputQueue("stream2")
while True:
cv2.imshow('stream1', qStream1.get().getCvFrame())
cv2.imshow('stream2', qStream2.get().getCvFrame())
if cv2.waitKey(1) == ord('q'):
break |
tests/spot/futures/test_futures_loan_adjust_collateral.py | Banging12/binance-connector-python | 512 | 11149221 | import responses
import pytest
from urllib.parse import urlencode
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
complete_params = {
"loanCoin": "BTC",
"collateralCoin": "BNB",
"amount": "1",
"direction": "ADDITIONAL",
}
parameterized_test_data = [
({"loanCoin": None, "collateralCoin": None, "amount": None, "direction": None}),
(
{
"loanCoin": "",
"collateralCoin": "BTC",
"amount": "1",
"direction": "ADDITIONAL",
}
),
(
{
"loanCoin": "BNB",
"collateralCoin": "",
"amount": "1",
"direction": "ADDITIONAL",
}
),
(
{
"loanCoin": "BNB",
"collateralCoin": "BTC",
"amount": "",
"direction": "ADDITIONAL",
}
),
({"loanCoin": "BNB", "collateralCoin": "BTC", "amount": "1", "direction": ""}),
]
@pytest.mark.parametrize("params", parameterized_test_data)
def test_futures_loan_adjust_collateral_with_missing_field(params):
"""Tests the API endpoint to Adjust Cross-Collateral LTV with missing field"""
client = Client(key, secret)
client.futures_loan_adjust_collateral.when.called_with(**params).should.throw(
ParameterRequiredError
)
@mock_http_response(
responses.POST,
"/sapi/v2/futures/loan/adjustCollateral\\?" + urlencode(complete_params),
mock_item,
200,
)
def test_futures_loan_adjust_collateral():
"""Tests the API endpoint to Adjust Cross-Collateral LTV"""
client = Client(key, secret)
response = client.futures_loan_adjust_collateral(**complete_params)
response.should.equal(mock_item)
|
url_filter/backends/base.py | peopleticker/django-url-filter-py3 | 303 | 11149238 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import abc
import six
from cached_property import cached_property
class BaseFilterBackend(six.with_metaclass(abc.ABCMeta, object)):
"""
Base filter backend from which all other backends must subclass.
Parameters
----------
queryset
Iterable which this filter backend will eventually filter.
The type of the iterable depends on the filter backend.
For example for :class:`.DjangoFilterBackend`, Django's
``QuerySet`` needs to be passed.
context: dict
Context dictionary. It could contain any information
which potentially could be useful to filter given
queryset. That can include, request, view, view kwargs, etc.
The idea is similar to DRF serializers. By passing the context,
it allows custom filters to reference all the information
they need to be able to effectively filter data.
"""
name = None
"""
Name of the filter backend.
This is used by custom callable filters to define callables
for each supported backend. More at :class:`.CallableFilter`
"""
supported_lookups = set()
"""
Set of supported lookups this filter backend supports.
This is used by leaf :class:`.Filter` to determine whether
it should construct :class:`.FilterSpec` for a particular
key-value pair from querystring since it if constructs
specification but then filter backend will not be able to
filter it, things will blow up. By explicitly checking
if filter backend supports particular lookup it can
short-circuit the logic and avoid errors down the road.
This is pretty much the only coupling between filters
and filter backends.
"""
enforce_same_models = True
"""
Whether same models should be enforced when trying to use
this filter backend.
More can be found in :meth:`BaseFilterBackend.model`
"""
def __init__(self, queryset, context=None):
self.queryset = queryset
self.context = context or {}
self.specs = []
@cached_property
def model(self):
"""
Property for getting model on which this filter backend operates.
This is meant to be used by the integrations directly shipped with
django-url-filter which need to be able to validate that the filterset
will be able to filter given queryset. They can do that by comparing
the model they are trying to filter matches the model the filterbackend
got. This primarily will have misconfigurations such as using
SQLAlchemy filterset to filter Django's ``QuerySet``.
"""
return self.get_model()
def bind(self, specs):
"""
Bind the given specs to the filter backend.
This allows the filter backend to be instantiated first before
filter specs are constructed and later, specs can be binded
to the backend.
Parameters
----------
specs : list
List of :class:`.FilterSpec` to be binded for the filter
backend for filtering
"""
self.specs = specs
@cached_property
def regular_specs(self):
"""
Property for getting standard filter specifications
which can be used directly by the filter backend
to filter queryset.
See Also
--------
callable_specs
"""
return [i for i in self.specs if not i.is_callable]
@cached_property
def callable_specs(self):
"""
Property for getting custom filter specifications
which have a filter callable for filtering querysets.
These specifications cannot be directly used by filter
backend and have to be called manually to filter data.
See Also
--------
regular_specs
"""
return [i for i in self.specs if i.is_callable]
@abc.abstractmethod
def get_model(self):
"""
Get the queryset model.
.. note:: **MUST** be implemented by subclasses.
:meth:`.model` property uses this method to get the model.
See Also
--------
model
"""
def filter(self):
"""
Main public method for filtering querysets.
"""
qs = self.filter_by_specs(self.queryset)
qs = self.filter_by_callables(qs)
return qs
@abc.abstractmethod
def filter_by_specs(self, queryset):
"""
Method for filtering queryset by using standard filter specs.
.. note:: **MUST** be implemented by subclasses
"""
@abc.abstractmethod
def empty(self):
"""
Method for returning empty queryset when any validations failed.
"""
def filter_by_callables(self, queryset):
"""
Method for filtering queryset by using custom filter callables
as given in the :class:`.Filter` definition.
This is really meant to accommodate filtering with simple
filter keys having complex filtering logic behind them.
More about custom callables can be found at :class:`.CallableFilter`
"""
if not self.callable_specs:
return queryset
for spec in self.callable_specs:
queryset = spec.filter_callable(queryset=queryset, spec=spec)
return queryset
|
inquirer/render/__init__.py | SteinRobert/python-inquirer | 640 | 11149246 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from .console import ConsoleRender
try:
from .ncourses import CoursesRender # noqa
except ImportError:
# ncourses will not be available
pass
class Render(object):
def __init__(self, impl=ConsoleRender):
self._impl = impl
def render(self, question, answers):
return self._impl.render(question, answers)
|
ranking/management/modules/dl_gsu.py | horacexd/clist | 166 | 11149259 | <reponame>horacexd/clist<gh_stars>100-1000
# -*- coding: utf-8 -*-
import re
from collections import OrderedDict, defaultdict
from datetime import timedelta
from pprint import pprint # noqa
from urllib.parse import urljoin
from ranking.management.modules.common import REQ, BaseModule, FailOnGetResponse, parsed_table
from ranking.management.modules.excepts import ExceptionParseStandings, InitModuleException
class Statistic(BaseModule):
def __init__(self, **kwargs):
super(Statistic, self).__init__(**kwargs)
if not self.name or not self.start_time or not self.url:
raise InitModuleException()
def get_standings(self, users=None, statistics=None):
standings_data = None
if not self.standings_url:
page = REQ.get(urljoin(self.url, '/'))
for name in (
'Соревнования',
'Тренировочные олимпиады',
):
match = re.search('<a[^>]*href="(?P<url>[^"]*)"[^>]*>{}<'.format(name), page)
url = match.group('url')
page = REQ.get(url)
regex = '''
<a[^>]*href=["']?[^<"']*cid=(?P<cid>[0-9]+)[^>]*>[^>]*{}[^>]*</a>.*?
<a[^>]*href="(?P<url>[^"]*)"[^>]*>{}<
'''.format(
re.escape(self.name),
re.escape('Результаты прошедших тренировок'),
)
match = re.search(regex, page, re.DOTALL | re.IGNORECASE | re.VERBOSE)
if not match:
raise ExceptionParseStandings('Not found standings urls list')
url = match.group('url')
cid = match.group('cid')
last_standings_data = self.resource.info['parse']['last_standings_data'].get(cid, {})
page = REQ.get(url)
dates = [self.start_time, self.start_time - timedelta(days=1)]
dates = [d.strftime('%Y-%m-%d') for d in dates]
re_dates = '|'.join(dates)
regex = r'''
<tr[^>]*>[^<]*<td[^>]*>\s*(?P<date>{})\s*</td>[^<]*
<td[^>]*>(?P<title>[^<]*)</td>[^<]*
<td[^>]*>[^<]*<a[^>]*href\s*=["\s]*(?P<url>[^">]*)["\s]*[^>]*>
'''.format(re_dates)
matches = re.findall(regex, page, re.MULTILINE | re.VERBOSE)
datas = [
{'date': date.strip(), 'title': title.strip(), 'url': urljoin(url, u)}
for date, title, u in matches
]
if len(datas) > 1:
regex = r'[0-9]\s*-\s*[0-9].*(?:[0-9]\s*-\s*[0-9].*\bкл\b|школа)'
datas = [d for d in datas if not re.search(regex, d['title'], re.I)]
if last_standings_data:
datas = [d for d in datas if d['date'] > last_standings_data['date']]
if not datas:
raise ExceptionParseStandings('Not found standings url')
if len(datas) > 1:
_datas = [d for d in datas if d['date'] == dates[0]]
if _datas:
datas = _datas
if len(datas) > 1:
ok = True
urls_map = {}
for d in datas:
url = d['url']
page = REQ.get(url)
path = re.findall('<td[^>]*nowrap><a[^>]*href="(?P<href>[^"]*)"', page)
if len(path) < 2:
ok = False
parent = urljoin(url, path[-2])
urls_map.setdefault(parent, d)
if len(urls_map) > 1:
standings_data = datas[0]
elif not ok:
raise ExceptionParseStandings('Too much standing url')
else:
standings_data = list(urls_map.values())[0]
else:
standings_data = datas[0]
page = REQ.get(standings_data['url'])
self.standings_url = REQ.last_url
try:
page = REQ.get(self.standings_url)
except FailOnGetResponse as e:
if e.code == 404:
raise ExceptionParseStandings('Not found response from standings url')
raise e
def get_table(page):
html_table = re.search('<table[^>]*bgcolor="silver"[^>]*>.*?</table>',
page,
re.MULTILINE | re.DOTALL).group(0)
table = parsed_table.ParsedTable(html_table)
return table
table = get_table(page)
problems_info = OrderedDict()
max_score = defaultdict(float)
scoring = False
result = {}
for r in table:
row = OrderedDict()
problems = row.setdefault('problems', {})
for k, v in list(r.items()):
if k == 'Имя':
href = v.column.node.xpath('a/@href')
if not href:
continue
uid = re.search('[0-9]+$', href[0]).group(0)
row['member'] = uid
row['name'] = v.value
elif k == 'Место':
row['place'] = v.value
elif k == 'Время':
row['penalty'] = int(v.value)
elif k in ['Сумма', 'Задачи']:
row['solving'] = float(v.value)
elif re.match('^[a-zA-Z0-9]+$', k):
problems_info[k] = {'short': k}
if v.value:
p = problems.setdefault(k, {})
p['result'] = v.value
if v.value and v.value[0] not in ['-', '+']:
scoring = True
try:
max_score[k] = max(max_score[k], float(v.value))
except ValueError:
pass
elif k:
row[k.strip()] = v.value.strip()
elif v.value.strip().lower() == 'log':
href = v.column.node.xpath('.//a/@href')
if href:
row['url'] = urljoin(self.standings_url, href[0])
result[row['member']] = row
if scoring:
match = re.search(r'<b[^>]*>\s*<a[^>]*href="(?P<url>[^"]*)"[^>]*>ACM</a>\s*</b>', page)
if match:
page = REQ.get(match.group('url'))
table = get_table(page)
for r in table:
uid = None
for k, v in list(r.items()):
if k == 'Имя':
href = v.column.node.xpath('a/@href')
if not href:
continue
uid = re.search('[0-9]+$', href[0]).group(0)
elif re.match('^[a-zA-Z0-9]+$', k) and uid and v.value:
if v.value[0] == '-':
result[uid]['problems'][k]['partial'] = True
elif v.value[0] == '+':
result[uid]['problems'][k]['partial'] = False
problems_info[k]['full_score'] = result[uid]['problems'][k]['result']
for r in result.values():
solved = 0
for k, p in r['problems'].items():
if p.get('partial'):
continue
score = p['result']
if score.startswith('+') or 'partial' in p and not p['partial']:
solved += 1
else:
try:
score = float(score)
except ValueError:
continue
if abs(max_score[k] - score) < 1e-9 and score > 0:
solved += 1
r['solved'] = {'solving': solved}
standings = {
'result': result,
'url': self.standings_url,
'problems': list(problems_info.values()),
'info_fields': ['_standings_data'],
}
if result and standings_data:
standings['_standings_data'] = standings_data
self.resource.info['parse']['last_standings_data'][cid] = standings_data
self.resource.save()
return standings
if __name__ == '__main__':
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../')))
os.environ['DJANGO_SETTINGS_MODULE'] = 'pyclist.settings'
from django import setup
setup()
from django.utils import timezone
from clist.models import Contest
qs = Contest.objects \
.filter(host='dl.gsu.by', end_time__lt=timezone.now()) \
.order_by('-start_time')
for contest in qs[:1]:
contest.standings_url = None
statistic = Statistic(
name=contest.title,
url=contest.url,
key=contest.key,
standings_url=contest.standings_url,
start_time=contest.start_time,
)
try:
pprint(statistic.get_standings())
except Exception:
pass
|
tortoise/utils.py | blazing-gig/tortoise-orm | 2,847 | 11149295 | <gh_stars>1000+
from typing import TYPE_CHECKING, Any, Iterable, Optional
from tortoise.log import logger
if TYPE_CHECKING: # pragma: nocoverage
from tortoise.backends.base.client import BaseDBAsyncClient
def get_schema_sql(client: "BaseDBAsyncClient", safe: bool) -> str:
"""
Generates the SQL schema for the given client.
:param client: The DB client to generate Schema SQL for
:param safe: When set to true, creates the table only when it does not already exist.
"""
generator = client.schema_generator(client)
return generator.get_create_schema_sql(safe)
async def generate_schema_for_client(client: "BaseDBAsyncClient", safe: bool) -> None:
"""
Generates and applies the SQL schema directly to the given client.
:param client: The DB client to generate Schema SQL for
:param safe: When set to true, creates the table only when it does not already exist.
"""
generator = client.schema_generator(client)
schema = get_schema_sql(client, safe)
logger.debug("Creating schema: %s", schema)
if schema: # pragma: nobranch
await generator.generate_from_string(schema)
def chunk(instances: Iterable[Any], batch_size: Optional[int] = None) -> Iterable[Iterable[Any]]:
"""
Generate iterable chunk by batch_size
# noqa: DAR301
"""
if not batch_size:
yield instances
else:
instances = list(instances)
for i in range(0, len(instances), batch_size):
yield instances[i : i + batch_size] # noqa:E203
|
timesketch/models/annotations.py | rushattac/timesketch | 1,810 | 11149304 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module implements annotations that can be use on other database models.
"""
from __future__ import unicode_literals
import json
import six
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from timesketch.models import BaseModel
from timesketch.models import db_session
class BaseAnnotation(object):
"""Base class with common attributes."""
@declared_attr
def user_id(self):
"""Foreign key to a user model.
Returns:
A column (instance of sqlalchemy.Column)
"""
return Column(Integer, ForeignKey('user.id'))
@declared_attr
def user(self):
"""A relationship to a user object.
Returns:
A relationship (instance of sqlalchemy.orm.relationship)
"""
return relationship('User')
class Label(BaseAnnotation):
"""A label annotation."""
label = Column(Unicode(255))
def __init__(self, user, label):
"""Initialize the model.
Args:
user: A user (instance of timesketch.models.user.User)
name: Name of the label
"""
super(Label, self).__init__()
self.user = user
self.label = label
class Comment(BaseAnnotation):
"""A comment annotation."""
comment = Column(UnicodeText())
def __init__(self, user, comment):
"""Initialize the model.
Args:
user: A user (instance of timesketch.models.user.User)
body: The body if the comment
"""
super(Comment, self).__init__()
self.user = user
self.comment = comment
class Status(BaseAnnotation):
"""A status annotation."""
status = Column(Unicode(255))
def __init__(self, user, status):
"""Initialize the model.
Args:
user: A user (instance of timesketch.models.user.User)
status: The type of status (string, e.g. open)
"""
super(Status, self).__init__()
self.user = user
self.status = status
class LabelMixin(object):
"""
A MixIn for generating the necessary tables in the database and to make
it accessible from the parent model object (the model object that uses this
MixIn, i.e. the object that the label is added to).
"""
@declared_attr
def labels(self):
"""
Generates the label tables and adds the attribute to the parent model
object.
Returns:
A relationship to an label (timesketch.models.annotation.Label)
"""
if six.PY2:
class_name = b'{0:s}Label'.format(self.__name__)
else:
class_name = '{0:s}Label'.format(self.__name__)
self.Label = type(class_name, (
Label,
BaseModel,),
dict(
__tablename__='{0:s}_label'.format(
self.__tablename__),
parent_id=Column(
Integer,
ForeignKey('{0:s}.id'.format(
self.__tablename__))),
parent=relationship(self)))
return relationship(self.Label)
def add_label(self, label, user=None):
"""Add a label to an object.
Each entry can have multible labels.
Args:
label: Name of the label.
user: Optional user that adds the label (sketch.User).
"""
if self.has_label(label):
return
self.labels.append(self.Label(user=user, label=label))
db_session.commit()
def remove_label(self, label):
"""Remove a label from an object.
Args:
label: Name of the label.
"""
for label_obj in self.labels:
if label_obj.label.lower() != label.lower():
continue
self.labels.remove(label_obj)
db_session.commit()
def has_label(self, label):
"""Returns a boolean whether a label is applied.
Args:
label: Name of the label.
Returns:
True if the label is set, False otherwise.
"""
for label_obj in self.labels:
if label_obj.label.lower() == label.lower():
return True
return False
@property
def get_labels(self):
"""Returns a list of all applied labels.
Returns:
A list of strings with all the applied labels.
"""
if not self.labels:
return []
return [x.label for x in self.labels]
@property
def label_string(self):
"""Returns a JSON encoded string with a list of the labels.
Returns:
A JSON encoded string with the list of labels.
"""
if not self.labels:
return ''
return json.dumps([x.label for x in self.labels])
class CommentMixin(object):
"""
A MixIn for generating the necessary tables in the database and to make
it accessible from the parent model object (the model object that uses this
MixIn, i.e. the object that the comment is added to).
"""
@declared_attr
def comments(self):
"""
Generates the comment tables and adds the attribute to the parent model
object.
Returns:
A relationship to a comment (timesketch.models.annotation.Comment)
"""
if six.PY2:
class_name = b'{0:s}Comment'.format(self.__name__)
else:
class_name = '{0:s}Comment'.format(self.__name__)
self.Comment = type(
class_name, (
Comment,
BaseModel, ),
dict(
__tablename__='{0:s}_comment'.format(self.__tablename__),
parent_id=Column(
Integer,
ForeignKey('{0:s}.id'.format(self.__tablename__))),
parent=relationship(self), ))
return relationship(self.Comment)
class StatusMixin(object):
"""
A MixIn for generating the necessary tables in the database and to make
it accessible from the parent model object (the model object that uses this
MixIn, i.e. the object that the status is added to).
"""
@declared_attr
def status(self):
"""
Generates the status tables and adds the attribute to the parent model
object.
Returns:
A relationship to a status (timesketch.models.annotation.Status)
"""
if six.PY2:
class_name = b'{0:s}Status'.format(self.__name__)
else:
class_name = '{0:s}Status'.format(self.__name__)
self.Status = type(
class_name, (
Status,
BaseModel, ),
dict(
__tablename__='{0:s}_status'.format(self.__tablename__),
parent_id=Column(
Integer,
ForeignKey('{0:s}.id'.format(self.__tablename__))),
parent=relationship(self), ))
return relationship(self.Status)
def set_status(self, status):
"""
Set status on object. Although this is a many-to-many relationship
this makes sure that the parent object only has one status set.
Args:
status: Name of the status
"""
for _status in self.status:
self.status.remove(_status)
self.status.append(self.Status(user=None, status=status))
db_session.commit()
@property
def get_status(self):
"""Get the current status.
Returns:
The status as a string
"""
if not self.status:
self.status.append(self.Status(user=None, status='new'))
return self.status[0]
|
autoimpute/utils/__init__.py | gjdv/autoimpute | 191 | 11149319 | <filename>autoimpute/utils/__init__.py
"""Manage the utils lib from the autoimpute package.
This module handles imports from the utils directory that should be accessible
whenever someone imports autoimpute.utils. The imports include methods for
checks & validations as well as functions to explore patterns in missing data.
This module handles `from autoimpute.utils import *` with the __all__ variable
below. This command imports the main public methods from autoimpute.utils.
"""
from .checks import check_data_structure, check_missingness
from .checks import check_nan_columns, check_strategy_allowed
from .checks import check_strategy_fit, check_predictors_fit
from .patterns import md_pairs, md_pattern, md_locations
from .patterns import inbound, outbound, influx, outflux, flux
from .patterns import proportions, nullility_cov, nullility_corr
__all__ = [
"check_data_structure",
"check_missingness",
"check_nan_columns",
"check_strategy_allowed",
"check_strategy_fit",
"check_predictors_fit",
"md_pairs",
"md_pattern",
"md_locations",
"inbound",
"outbound",
"influx",
"outflux",
"flux",
"proportions",
"nullility_cov",
"nullility_corr"
]
|
goodtables/cli.py | davidpeckham/goodtables-py | 243 | 11149352 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from sys import exit
import click
import goodtables
import json as json_module
from pprint import pformat
from click_default_group import DefaultGroup
from .helpers import init_datapackage
from . import config
click.disable_unicode_literals_warning = True
# Module API
@click.group(cls=DefaultGroup, default='validate', default_if_no_args=True, help='')
@click.version_option(config.VERSION, message='%(version)s')
def cli():
"""Command-line interface
```
Usage: cli.py [OPTIONS] COMMAND [ARGS]...
Options:
--version Show the version and exit.
--help Show this message and exit.
Commands:
validate* Validate tabular files (default).
init Init data package from list of files.
```
"""
pass
@cli.command(
short_help='Validate tabular files (default).',
)
@click.argument('paths', type=click.Path(), nargs=-1, required=True)
@click.option('--quiet', '-q', is_flag=True, help='Don\'t output anything.')
@click.option('--json', is_flag=True, help='Output report as JSON.')
@click.option(
'--output',
'-o',
type=click.File('w'),
default='-',
help='Redirect output to a file.'
)
@click.option('--preset')
@click.option('--schema', type=click.Path(), help='Path to a Table Schema.')
@click.option(
'--infer-schema/--no-infer-schema',
default=False,
help='Infer schema. If an explicit schema is defined, infer missing columns only.'
)
@click.option('--checks', '-c', multiple=True, help='Checks to enable.')
@click.option(
'--skip-checks',
'-C',
multiple=True,
help='Checks to disable.'
)
@click.option(
'--order-fields',
is_flag=True,
help='Don\'t validate the columns order.'
)
@click.option(
'--row-limit',
type=int,
default=-1,
help='Maximum number of rows to validate (-1 for no limit)'
)
@click.option(
'--table-limit',
type=int,
default=-1,
help='Maximum number of tables to validate (-1 for no limit)'
)
@click.option(
'--error-limit',
type=int,
default=-1,
help='Stop validating if there are more than this number of errors (-1 for no limit).'
)
def validate(paths, json, **options):
# Remove blank values
options = {key: value for key, value in options.items() if value is not None}
if not options['checks']:
del options['checks']
if not options['skip_checks']:
del options['skip_checks']
options['infer_fields'] = options['infer_schema']
quiet = options.pop('quiet')
output = options.pop('output')
if options.get('preset') == 'datapackage':
sources = paths[0]
else:
sources = [{'source': path} for path in paths]
schema = options.pop('schema', None)
if schema:
for source in sources:
source['schema'] = schema
report = goodtables.validate(sources, **options)
if not quiet:
_print_report(report, output=output, json=json)
exit(int(not report['valid']))
@cli.command()
@click.argument('paths', type=click.Path(), nargs=-1, required=True)
@click.option(
'--output',
'-o',
type=click.File('w'),
default='-',
help='Redirect output to a file.'
)
def init(paths, output, **kwargs):
"""Init data package from list of files.
It will also infer tabular data's schemas from their contents.
"""
dp = init_datapackage(paths)
click.secho(
json_module.dumps(dp.descriptor, indent=4),
file=output
)
exit(int(not dp.valid)) # Just to be defensive, as it should always be valid.
# Internal
def _print_report(report, output=None, json=False):
def secho(*args, **kwargs):
click.secho(file=output, *args, **kwargs)
if json:
return secho(json_module.dumps(report, indent=4))
color = 'green' if report['valid'] else 'red'
tables = report.pop('tables')
warnings = report.pop('warnings')
secho('DATASET', bold=True)
secho('='*7, bold=True)
secho(pformat(report), fg=color, bold=True)
if warnings:
secho('-'*9, bold=True)
for warning in warnings:
secho('Warning: %s' % warning, fg='yellow')
for table_number, table in enumerate(tables, start=1):
secho('\nTABLE [%s]' % table_number, bold=True)
secho('='*9, bold=True)
color = 'green' if table['valid'] else 'red'
errors = table.pop('errors')
secho(pformat(table), fg=color, bold=True)
if errors:
secho('-'*9, bold=True)
for error in errors:
template = '[{row-number},{column-number}] [{code}] {message}'
substitutions = {
'row-number': error.get('row-number', '-'),
'column-number': error.get('column-number', '-'),
'code': error.get('code', '-'),
'message': error.get('message', '-'),
}
message = template.format(**substitutions)
secho(message)
# Main
if __name__ == "__main__":
cli()
|
MSDN_crawler/extract_til_constant_info.py | clayne/flare-ida | 1,471 | 11149374 | """
Obtain matchups between a constant name and the standard enum IDA Pro uses.
Authors: <NAME>, <NAME>
Copyright 2014 Mandiant, A FireEye Company
Mandiant licenses this file to you under the Apache License, Version
2.0 (the "License"); you may not use this file except in compliance with the
License. You may obtain a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
"""
import re
import sys
import os
import logging
import subprocess
g_logger = logging.getLogger("til_extractor")
def show_usage():
print 'Usage:',
print sys.argv[0] + ' <path to tilib> <til directory>'
def main(tilib_exe, til_dir):
logging.basicConfig(level=logging.WARN)
if not os.path.isfile(tilib_exe):
g_logger.warn(tilib_exe + ' is not a file')
return False
if not os.path.isdir(til_dir):
g_logger.warn(til_dir + ' is not a directory')
return False
const_pattern = re.compile("([0-9A-Fa-f]{8}) ([0-9A-Fa-f]{8}) +([A-Za-z0-9_]+) ([A-Za-z0-9_]+)")
ignored_enum_names = set(["int", "unsigned", "const", "UINT", "void", "struct", "__int16", "char"])
for til_file in os.listdir(til_dir):
til_file = os.path.join(til_dir, til_file)
g_logger.debug("Will process til file: %s", til_file)
if not os.path.isfile(til_file):
continue
try:
output = subprocess.check_output([tilib_exe, "-l", til_file],
shell=True,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
g_logger.warn("Error calling tilib.exe with %s -- %s", til_file, e)
# Not all files can be parsed correctly
continue
enums = {} # dict of (enum_name:string, enum_def:dict of (constant_name:string, constant_value:int))
for line in output.split("\n"):
if "__stdcall" in line:
continue
if "__cdecl" in line:
continue
if "__fastcall" in line:
continue
m = const_pattern.match(line)
if not m:
continue
constant_value = int(m.group(2), 0x10)
enum_name = m.group(3)
constant_name = m.group(4)
# our simple parsing of the text output isn't very smart, so we get
# some typedefs, too try to ignore those, on a best effort basis
if enum_name in ignored_enum_names:
continue
g_logger.debug("%s", line)
g_logger.debug(" value: %s", hex(constant_value))
g_logger.debug(" enum_name: %s", enum_name)
g_logger.debug(" constant_name: %s", constant_name)
enum = enums.get(enum_name, {})
if constant_name not in enum:
enum[constant_name] = constant_value
enums[enum_name] = enum
return_data = {} # dict of (constant_name:string, enum_name:string)
for enum_name, enum in enums.iteritems():
for constant_name, constant_value in enum.iteritems():
return_data[constant_name] = enum_name
return return_data
if __name__ == '__main__':
main()
|
MuZero/MuCoach.py | morozig/muzero | 111 | 11149395 | <filename>MuZero/MuCoach.py
"""
Implements the abstract Coach class for defining the data sampling procedures for MuZero neural network training.
Notes:
- Base implementation done.
- Documentation 15/11/2020
"""
import typing
from datetime import datetime
import numpy as np
import tensorflow as tf
from Coach import Coach
from Agents import DefaultMuZeroPlayer
from MuZero.MuMCTS import MuZeroMCTS
from utils import DotDict
from utils.selfplay_utils import GameHistory, sample_batch
class MuZeroCoach(Coach):
"""
Implement base Coach class to define proper data-batch sampling procedures and logging objects.
"""
def __init__(self, game, neural_net, args: DotDict, run_name: typing.Optional[str] = None) -> None:
"""
Initialize the class for self-play. This inherited method initializes tensorboard logging and defines
helper variables for data batch sampling.
The super class is initialized with the proper search engine and agent-interface. (MuZeroMCTS, MuZeroPlayer)
:param game: Game Implementation of Game class for environment logic.
:param neural_net: MuNeuralNet Implementation of MuNeuralNet class for inference.
:param args: DotDict Data structure containing parameters for self-play.
:param run_name: str Optionally provide a run-name for the TensorBoard log-files. Default is current datetime.
"""
super().__init__(game, neural_net, args, MuZeroMCTS, DefaultMuZeroPlayer)
# Initialize tensorboard logging.
if run_name is None:
run_name = datetime.now().strftime("%Y%m%d-%H%M%S")
self.log_dir = f"out/logs/MuZero/{self.neural_net.architecture}/" + run_name
self.file_writer = tf.summary.create_file_writer(self.log_dir + "/metrics")
self.file_writer.set_as_default()
# Define helper variables.
self.return_forward_observations = (neural_net.net_args.dynamics_penalty > 0 or args.latent_decoder)
self.observation_stack_length = neural_net.net_args.observation_length
def buildHypotheticalSteps(self, history: GameHistory, t: int, k: int) -> \
typing.Tuple[np.ndarray, typing.Tuple[np.ndarray, np.ndarray, np.ndarray], np.ndarray]:
"""
Sample/ extrapolate a sequence of targets for unrolling/ fitting the MuZero neural network.
This sequence consists of the actions performed at time t until t + k - 1. These are used for unrolling the
dynamics model. For extrapolating beyond terminal states we adopt an uniform policy over the entire action
space to ensure that the model learns to generalize over the actions when encountering terminal states.
The move-probabilities, value, and reward predictions are sampled from t until t + k. Note that the reward
at the first index is not used for weight optimization as the initial call to the model does not predict
rewards. For extrapolating beyond terminal states we repeat a zero vector for the move-probabilities and
zeros for the reward and value targets seeing as a terminated environment does not provide rewards. The
zero vector for the move-probabilities is used to define an improper probability distribution. The loss
function can then infer that the episode ended, and distribute gradient accordingly.
Empirically we observed that extrapolating an uniform move-policy for the move-probability vector results
in slower and more unstable learning as we're feeding wrong data to the neural networks. We found that not
distributing any gradient at all to these extrapolated steps resulted in the best learning.
:param history: GameHistory Sampled data structure containing all statistics/ observations of a finished game.
:param t: int The sampled index to generate the targets at.
:param k: int The number of unrolling steps to perform/ length of the dynamics model target sequence.
:return: Tuple of (actions, targets, future_inputs) that the neural network needs for optimization
"""
# One hot encode actions.
actions = history.actions[t:t+k]
a_truncation = k - len(actions)
if a_truncation > 0: # Uniform policy when unrolling beyond terminal states.
actions += np.random.randint(self.game.getActionSize(), size=a_truncation).tolist()
enc_actions = np.zeros([k, self.game.getActionSize()])
enc_actions[np.arange(len(actions)), actions] = 1
# Value targets.
pis = history.probabilities[t:t+k+1]
vs = history.observed_returns[t:t+k+1]
rewards = history.rewards[t:t+k+1]
# Handle truncations > 0 due to terminal states. Treat last state as absorbing state
t_truncation = (k + 1) - len(pis) # Target truncation due to terminal state
if t_truncation > 0:
pis += [np.zeros_like(pis[-1])] * t_truncation # Zero vector
rewards += [0] * t_truncation # = 0
vs += [0] * t_truncation # = 0
# If specified, also sample/ extrapolate future observations. Otherwise return an empty array.
obs_trajectory = []
if self.return_forward_observations:
obs_trajectory = [history.stackObservations(self.observation_stack_length, t=t+i+1) for i in range(k)]
# (Actions, Targets, Observations)
return enc_actions, (np.asarray(vs), np.asarray(rewards), np.asarray(pis)), obs_trajectory
def sampleBatch(self, histories: typing.List[GameHistory]) -> typing.List:
"""
Construct a batch of data-targets for gradient optimization of the MuZero neural network.
The procedure samples a list of game and inside-game coordinates of length 'batch_size'. This is done either
uniformly or with prioritized sampling. Using this list of coordinates, we sample the according games, and
the according points of times within the game to generate neural network inputs, targets, and sample weights.
:param histories: List of GameHistory objects. Contains all game-trajectories in the replay-buffer.
:return: List of training examples: (observations, actions, targets, forward_observations, sample_weights)
"""
# Generate coordinates within the replay buffer to sample from. Also generate the loss scale of said samples.
sample_coordinates, sample_weight = sample_batch(
list_of_histories=histories, n=self.neural_net.net_args.batch_size, prioritize=self.args.prioritize,
alpha=self.args.prioritize_alpha, beta=self.args.prioritize_beta)
# Collect training examples for MuZero: (input, action, (targets), forward_observations, loss_scale)
examples = [(
histories[h_i].stackObservations(self.observation_stack_length, t=i),
*self.buildHypotheticalSteps(histories[h_i], t=i, k=self.args.K),
loss_scale
)
for (h_i, i), loss_scale in zip(sample_coordinates, sample_weight)
]
return examples
|
Python/Tests/TestData/TestDiscoverer/ConfigPythonFiles/test_pt.py | techkey/PTVS | 404 | 11149408 | def test_1():
assert True
|
src/collectors/sqs/sqs.py | hermdog/Diamond | 1,795 | 11149412 | # coding=utf-8
"""
The SQS collector collects metrics for one or more Amazon AWS SQS queues
#### Configuration
Below is an example configuration for the SQSCollector.
You can specify an arbitrary amount of regions
```
enabled = True
interval = 60
[regions]
[[region-code]]
queues = queue_name[,queue_name2[,..]]
# Optional - assumes IAM role with instance profile if not provided.
access_key_id = '...'
secret_access_key = '''
```
Note: If you modify the SQSCollector configuration, you will need to
restart diamond.
#### Dependencies
* boto
"""
import diamond.collector
try:
from boto import sqs
except ImportError:
sqs = False
class SqsCollector(diamond.collector.Collector):
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SqsCollector, self).get_default_config()
config.update({
'path': 'sqs',
})
return config
def collect(self):
attribs = ['ApproximateNumberOfMessages',
'ApproximateNumberOfMessagesNotVisible',
'ApproximateNumberOfMessagesDelayed',
'CreatedTimestamp',
'DelaySeconds',
'LastModifiedTimestamp',
'MaximumMessageSize',
'MessageRetentionPeriod',
'ReceiveMessageWaitTimeSeconds',
'VisibilityTimeout']
if not sqs:
self.log.error("boto module not found!")
return
for (region, region_cfg) in self.config['regions'].items():
assert 'queues' in region_cfg
auth_kwargs = _get_auth_kwargs(config=region_cfg)
queues = region_cfg['queues'].split(',')
for queue_name in queues:
conn = sqs.connect_to_region(region, **auth_kwargs)
queue = conn.get_queue(queue_name)
for attrib in attribs:
d = queue.get_attributes(attrib)
self.publish(
'%s.%s.%s' % (region, queue_name, attrib),
d[attrib]
)
def _get_auth_kwargs(config):
"""Generate the kwargs for the AWS keys from a configuration dictionary.
If credentials are not present in the config, then assume that
we're using IAM roles with instance profiles. :mod:`boto` will
automatically take care of using the credentials from the instance
metadata if not provided with kwargs.
:param config: The configuration to use when looking for explicitly
provided AWS credentials.
:type config: dict
:returns: The kwargs for use with :mod:`boto` connect functions.
:rtype: dict
"""
if not ('access_key_id' in config and 'secret_access_key' in config):
return {}
return {
'aws_access_key_id': config['access_key_id'],
'aws_secret_access_key': config['secret_access_key'],
}
|
tests/test_smtp.py | IncognitoCoding/mailrise | 177 | 11149452 | from email.message import EmailMessage
from pathlib import Path
from mailrise.config import Key
from mailrise.smtp import RecipientError, parsemessage, parsercpt
import apprise
import pytest
def test_parsercpt() -> None:
"""Tests for recipient parsing."""
rcpt = parsercpt('<EMAIL>')
assert rcpt.key == Key(user='test')
assert rcpt.notify_type == apprise.NotifyType.INFO
rcpt = parsercpt('<EMAIL>')
assert rcpt.key == Key(user='test')
assert rcpt.notify_type == apprise.NotifyType.WARNING
rcpt = parsercpt('"with_quotes"@<EMAIL>')
assert rcpt.key == Key(user='with_quotes')
assert rcpt.notify_type == apprise.NotifyType.INFO
rcpt = parsercpt('"with_quotes.success"@mail<EMAIL>')
assert rcpt.key == Key('with_quotes')
assert rcpt.notify_type == apprise.NotifyType.SUCCESS
rcpt = parsercpt('"weird_quotes".<EMAIL>')
assert rcpt.key == Key('"weird_quotes"')
assert rcpt.notify_type == apprise.NotifyType.SUCCESS
rcpt = parsercpt('<NAME> <<EMAIL>>')
assert rcpt.key == Key('johndoe')
assert rcpt.notify_type == apprise.NotifyType.WARNING
with pytest.raises(RecipientError):
parsercpt("Invalid Email <bad@>")
def test_parsemessage() -> None:
"""Tests for email message parsing."""
msg = EmailMessage()
msg.set_content('Hello, World!')
msg['From'] = ''
msg['Subject'] = 'Test Message'
notification = parsemessage(msg)
assert notification.subject == 'Test Message'
assert notification.body == 'Hello, World!'
assert notification.body_format == apprise.NotifyFormat.TEXT
msg = EmailMessage()
msg.set_content('Hello, World!')
msg.add_alternative('Hello, <strong>World!</strong>', subtype='html')
notification = parsemessage(msg)
assert notification.subject == '[no subject]'
assert notification.from_ == '[no sender]'
assert notification.body == 'Hello, <strong>World!</strong>'
assert notification.body_format == apprise.NotifyFormat.HTML
def test_parseattachments() -> None:
"""Tests for email message parsing with attachments."""
img_name = 'bridge.jpg'
with open(Path(__file__).parent/img_name, 'rb') as fp:
img_data = fp.read()
msg = EmailMessage()
msg.set_content('Hello, World!')
msg['From'] = '<EMAIL>'
msg['Subject'] = 'Now With Images'
msg.add_attachment(
img_data,
maintype='image',
subtype='jpeg',
filename=img_name
)
notification = parsemessage(msg)
assert notification.subject == 'Now With Images'
assert notification.from_ == '<EMAIL>'
assert notification.body == 'Hello, World!'
assert notification.body_format == apprise.NotifyFormat.TEXT
assert len(notification.attachments) == 1
assert notification.attachments[0].data == img_data
assert notification.attachments[0].filename == img_name
msg = EmailMessage()
msg.set_content('Hello, World!')
msg['From'] = '<EMAIL>'
msg['Subject'] = 'Now With Images'
msg.add_attachment(
img_data,
maintype='image',
subtype='jpeg',
filename=f'1_{img_name}'
)
msg.add_attachment(
img_data,
maintype='image',
subtype='jpeg',
filename=f'2_{img_name}'
)
notification = parsemessage(msg)
assert notification.subject == 'Now With Images'
assert notification.from_ == '<EMAIL>'
assert notification.body == 'Hello, World!'
assert notification.body_format == apprise.NotifyFormat.TEXT
assert len(notification.attachments) == 2
for attach in notification.attachments:
assert attach.data == img_data
assert notification.attachments[0].filename == f'1_{img_name}'
assert notification.attachments[1].filename == f'2_{img_name}'
|
rclpy/executors/examples_rclpy_executors/callback_group.py | peterpolidoro/ros2_examples | 335 | 11149464 | <filename>rclpy/executors/examples_rclpy_executors/callback_group.py
# Copyright 2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from examples_rclpy_executors.listener import Listener
import rclpy
from rclpy.callback_groups import MutuallyExclusiveCallbackGroup
from rclpy.executors import MultiThreadedExecutor
from rclpy.node import Node
from std_msgs.msg import String
class DoubleTalker(Node):
"""Publish messages to a topic using two publishers at different rates."""
def __init__(self):
super().__init__('double_talker')
self.i = 0
self.pub = self.create_publisher(String, 'chatter', 10)
# This type of callback group only allows one callback to be executed at a time
self.group = MutuallyExclusiveCallbackGroup()
# Pass the group as a parameter to give it control over the execution of the timer callback
self.timer = self.create_timer(1.0, self.timer_callback, callback_group=self.group)
self.timer2 = self.create_timer(0.5, self.timer_callback, callback_group=self.group)
def timer_callback(self):
msg = String()
msg.data = 'Hello World: {0}'.format(self.i)
self.i += 1
self.get_logger().info('Publishing: "{0}"'.format(msg.data))
self.pub.publish(msg)
def main(args=None):
rclpy.init(args=args)
try:
talker = DoubleTalker()
listener = Listener()
# MultiThreadedExecutor executes callbacks with a thread pool. If num_threads is not
# specified then num_threads will be multiprocessing.cpu_count() if it is implemented.
# Otherwise it will use a single thread. This executor will allow callbacks to happen in
# parallel, however the MutuallyExclusiveCallbackGroup in DoubleTalker will only allow its
# callbacks to be executed one at a time. The callbacks in Listener are free to execute in
# parallel to the ones in DoubleTalker however.
executor = MultiThreadedExecutor(num_threads=4)
executor.add_node(talker)
executor.add_node(listener)
try:
executor.spin()
finally:
executor.shutdown()
listener.destroy_node()
talker.destroy_node()
finally:
rclpy.shutdown()
if __name__ == '__main__':
main()
|
droidlet/interpreter/tests/test_interpreter_utils.py | ali-senguel/fairo | 669 | 11149471 | <gh_stars>100-1000
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import re
import unittest
from copy import deepcopy
from droidlet.interpreter import process_spans_and_remove_fixed_value
from droidlet.perception.semantic_parsing.tests.test_y_print_parsing_report import (
common_functional_commands,
compare_full_dictionaries,
)
from .all_test_commands import INTERPRETER_POSSIBLE_ACTIONS, FILTERS, REFERENCE_OBJECTS
logical_form_before_processing = {
"turn right": common_functional_commands["turn right"],
"where are my keys": common_functional_commands["where are my keys"],
"go forward": common_functional_commands["go forward"],
}
# FIXME! put these in the main file
logical_form_post_processing = {
"turn right": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{"dance_type": {"body_turn": {"relative_yaw": "-90"}}, "action_type": "DANCE"}
],
},
"where are my keys": {
"dialogue_type": "GET_MEMORY",
"filters": {
"output": {"attribute": "LOCATION"},
"where_clause": {"AND": [{"pred_text": "has_name", "obj_text": "keys"}]},
},
},
"go forward": {
"dialogue_type": "HUMAN_GIVE_COMMAND",
"action_sequence": [
{
"location": {
"relative_direction": "FRONT",
"reference_object": {"special_reference": "AGENT"},
},
"action_type": "MOVE",
}
],
},
}
class TestInterpreterUtils(unittest.TestCase):
def test_process_spans(self):
for k, v in logical_form_before_processing.items():
processed = deepcopy(v)
original_words = re.split(r" +", k)
lemmatized_words = original_words
process_spans_and_remove_fixed_value(
processed, original_words, lemmatized_words
) # process spans and fixed_values. Implemented in: interpreter_utils.
assert compare_full_dictionaries(processed, logical_form_post_processing[k])
def test_location_reference_object(self):
def check_location_in_filters(action_dict):
for key, value in action_dict.items():
if key == "filters" and "location" in value:
return False
elif type(value) == dict:
return check_location_in_filters(value)
return True
all_dicts = INTERPRETER_POSSIBLE_ACTIONS
all_dicts.update(FILTERS)
all_dicts.update(REFERENCE_OBJECTS)
for key, action_dict in all_dicts.items():
self.assertTrue(check_location_in_filters(action_dict))
if __name__ == "__main__":
unittest.main()
|
pytype/tests/test_tracebacks2.py | Jrryy/pytype | 3,882 | 11149472 | <reponame>Jrryy/pytype<gh_stars>1000+
"""Tests for displaying tracebacks in error messages."""
from pytype.tests import test_base
class TracebackTest(test_base.BaseTest):
"""Tests for tracebacks in error messages."""
def test_build_class(self):
errors = self.CheckWithErrors("""
class Foo:
def f(self, x: Bar): # name-error[e]
pass
""")
self.assertErrorRegexes(errors, {"e": r"Bar.*not defined$"})
if __name__ == "__main__":
test_base.main()
|
kats/models/metalearner/__init__.py | iamxiaodong/Kats | 3,580 | 11149476 | <reponame>iamxiaodong/Kats<filename>kats/models/metalearner/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
try:
from . import get_metadata # noqa
except ImportError:
import logging
logging.warning(
"kats.models.metalearner.get_metadata requires ax-platform be installed"
)
try:
from . import metalearner_hpt # noqa
except ImportError:
import logging
logging.warning(
"kats.models.metalearner.metalearner_hpt requires torch be installed"
)
from . import metalearner_modelselect # noqa
from . import metalearner_predictability # noqa
|
tests/fixtures/envs/dummy/dummy_reward_box_env.py | blacksph3re/garage | 1,500 | 11149477 | <filename>tests/fixtures/envs/dummy/dummy_reward_box_env.py
from tests.fixtures.envs.dummy import DummyBoxEnv
class DummyRewardBoxEnv(DummyBoxEnv):
"""A dummy box environment."""
def __init__(self, random=True):
super().__init__(random)
def step(self, action):
"""Step the environment."""
if action == 0:
reward = 10
else:
reward = -10
return self.observation_space.sample(), reward, True, dict()
|
tests/helper.py | Jsn2win/pycoinnet | 114 | 11149496 | import hashlib
from pycoin import ecdsa
from pycoin.block import Block, BlockHeader
from pycoin.encoding import public_pair_to_sec
from pycoin.tx.Tx import Tx, TxIn, TxOut
GENESIS_TIME = 1390000000
DEFAULT_DIFFICULTY = 3000000
HASH_INITIAL_BLOCK = b'\0' * 32
def make_hash(i, s=b''):
return hashlib.sha256(("%d_%s" % (i, s)).encode()).digest()
def make_tx(i):
txs_in = [TxIn(make_hash(i*10000+idx), (i+idx) % 2) for idx in range(3)]
txs_out = [TxOut(i*40000, make_hash(i*20000+idx)) for idx in range(2)]
tx = Tx(1, txs_in, txs_out)
return tx
def make_headers(count, header=None):
if header is None:
last_hash = HASH_INITIAL_BLOCK
else:
last_hash = header.hash()
tweak = last_hash
headers = []
for i in range(count):
headers.append(
BlockHeader(version=1, previous_block_hash=last_hash, merkle_root=make_hash(i, tweak),
timestamp=GENESIS_TIME+i*600, difficulty=DEFAULT_DIFFICULTY, nonce=i*137))
last_hash = headers[-1].hash()
return headers
def make_block(index):
s = index*30000
txs = [make_tx(i) for i in range(s, s+8)]
block = Block(version=1, previous_block_hash=b'\0'*32, merkle_root=b'\0'*32,
timestamp=GENESIS_TIME+index, difficulty=s, nonce=s, txs=txs)
return block
def coinbase_tx(secret_exponent):
public_pair = ecdsa.public_pair_for_secret_exponent(
ecdsa.secp256k1.generator_secp256k1, secret_exponent)
public_key_sec = public_pair_to_sec(public_pair)
return Tx.coinbase_tx(public_key_sec, 2500000000)
COINBASE_TX = coinbase_tx(1)
def make_blocks(count, nonce_base=30000, previous_block_hash=HASH_INITIAL_BLOCK):
blocks = []
for i in range(count):
s = i * nonce_base
txs = [COINBASE_TX] # + [make_tx(i) for i in range(s, s+8)]
nonce = s
while True:
block = Block(version=1, previous_block_hash=previous_block_hash, merkle_root=b'\0'*32,
timestamp=GENESIS_TIME+i*600, difficulty=i, nonce=nonce, txs=txs)
if block.hash()[-1] == i & 0xff:
break
nonce += 1
blocks.append(block)
previous_block_hash = block.hash()
return blocks
|
test/unit/test_system_client.py | tomasfarias/dbt-core | 799 | 11149502 | import os
import shutil
import stat
import unittest
import tarfile
import io
from pathlib import Path
from tempfile import mkdtemp, NamedTemporaryFile
from dbt.exceptions import ExecutableError, WorkingDirectoryError
import dbt.clients.system
class SystemClient(unittest.TestCase):
def setUp(self):
super().setUp()
self.tmp_dir = mkdtemp()
self.profiles_path = '{}/profiles.yml'.format(self.tmp_dir)
def set_up_profile(self):
with open(self.profiles_path, 'w') as f:
f.write('ORIGINAL_TEXT')
def get_profile_text(self):
with open(self.profiles_path, 'r') as f:
return f.read()
def tearDown(self):
try:
shutil.rmtree(self.tmp_dir)
except:
pass
def test__make_file_when_exists(self):
self.set_up_profile()
written = dbt.clients.system.make_file(self.profiles_path, contents='NEW_TEXT')
self.assertFalse(written)
self.assertEqual(self.get_profile_text(), 'ORIGINAL_TEXT')
def test__make_file_when_not_exists(self):
written = dbt.clients.system.make_file(self.profiles_path, contents='NEW_TEXT')
self.assertTrue(written)
self.assertEqual(self.get_profile_text(), 'NEW_TEXT')
def test__make_file_with_overwrite(self):
self.set_up_profile()
written = dbt.clients.system.make_file(self.profiles_path, contents='NEW_TEXT', overwrite=True)
self.assertTrue(written)
self.assertEqual(self.get_profile_text(), 'NEW_TEXT')
class TestRunCmd(unittest.TestCase):
"""Test `run_cmd`.
Don't mock out subprocess, in order to expose any OS-level differences.
"""
not_a_file = 'zzzbbfasdfasdfsdaq'
def setUp(self):
self.tempdir = mkdtemp()
self.run_dir = os.path.join(self.tempdir, 'run_dir')
self.does_not_exist = os.path.join(self.tempdir, 'does_not_exist')
self.empty_file = os.path.join(self.tempdir, 'empty_file')
if os.name == 'nt':
self.exists_cmd = ['cmd', '/C', 'echo', 'hello']
else:
self.exists_cmd = ['echo', 'hello']
os.mkdir(self.run_dir)
with open(self.empty_file, 'w') as fp:
pass # "touch"
def tearDown(self):
shutil.rmtree(self.tempdir)
def test__executable_does_not_exist(self):
with self.assertRaises(ExecutableError) as exc:
dbt.clients.system.run_cmd(self.run_dir, [self.does_not_exist])
msg = str(exc.exception).lower()
self.assertIn('path', msg)
self.assertIn('could not find', msg)
self.assertIn(self.does_not_exist.lower(), msg)
def test__not_exe(self):
with self.assertRaises(ExecutableError) as exc:
dbt.clients.system.run_cmd(self.run_dir, [self.empty_file])
msg = str(exc.exception).lower()
if os.name == 'nt':
# on windows, this means it's not an executable at all!
self.assertIn('not executable', msg)
else:
# on linux, this means you don't have executable permissions on it
self.assertIn('permissions', msg)
self.assertIn(self.empty_file.lower(), msg)
def test__cwd_does_not_exist(self):
with self.assertRaises(WorkingDirectoryError) as exc:
dbt.clients.system.run_cmd(self.does_not_exist, self.exists_cmd)
msg = str(exc.exception).lower()
self.assertIn('does not exist', msg)
self.assertIn(self.does_not_exist.lower(), msg)
def test__cwd_not_directory(self):
with self.assertRaises(WorkingDirectoryError) as exc:
dbt.clients.system.run_cmd(self.empty_file, self.exists_cmd)
msg = str(exc.exception).lower()
self.assertIn('not a directory', msg)
self.assertIn(self.empty_file.lower(), msg)
def test__cwd_no_permissions(self):
# it would be nice to add a windows test. Possible path to that is via
# `psexec` (to get SYSTEM privs), use `icacls` to set permissions on
# the directory for the test user. I'm pretty sure windows users can't
# create files that they themselves cannot access.
if os.name == 'nt':
return
# read-only -> cannot cd to it
os.chmod(self.run_dir, stat.S_IRUSR)
with self.assertRaises(WorkingDirectoryError) as exc:
dbt.clients.system.run_cmd(self.run_dir, self.exists_cmd)
msg = str(exc.exception).lower()
self.assertIn('permissions', msg)
self.assertIn(self.run_dir.lower(), msg)
def test__ok(self):
out, err = dbt.clients.system.run_cmd(self.run_dir, self.exists_cmd)
self.assertEqual(out.strip(), b'hello')
self.assertEqual(err.strip(), b'')
class TestFindMatching(unittest.TestCase):
def setUp(self):
self.base_dir = mkdtemp()
self.tempdir = mkdtemp(dir=self.base_dir)
def test_find_matching_lowercase_file_pattern(self):
with NamedTemporaryFile(
prefix='sql-files', suffix='.sql', dir=self.tempdir
) as named_file:
file_path = os.path.dirname(named_file.name)
relative_path = os.path.basename(file_path)
out = dbt.clients.system.find_matching(
self.base_dir, [relative_path], '*.sql'
)
expected_output = [{
'searched_path': relative_path,
'absolute_path': named_file.name,
'relative_path': os.path.basename(named_file.name),
'modification_time': out[0]['modification_time'],
}]
self.assertEqual(out, expected_output)
def test_find_matching_uppercase_file_pattern(self):
with NamedTemporaryFile(prefix='sql-files', suffix='.SQL', dir=self.tempdir) as named_file:
file_path = os.path.dirname(named_file.name)
relative_path = os.path.basename(file_path)
out = dbt.clients.system.find_matching(
self.base_dir, [relative_path], '*.sql'
)
expected_output = [{
'searched_path': relative_path,
'absolute_path': named_file.name,
'relative_path': os.path.basename(named_file.name),
'modification_time': out[0]['modification_time'],
}]
self.assertEqual(out, expected_output)
def test_find_matching_file_pattern_not_found(self):
with NamedTemporaryFile(
prefix='sql-files', suffix='.SQLT', dir=self.tempdir
):
out = dbt.clients.system.find_matching(self.tempdir, [''], '*.sql')
self.assertEqual(out, [])
def tearDown(self):
try:
shutil.rmtree(self.base_dir)
except:
pass
class TestUntarPackage(unittest.TestCase):
def setUp(self):
self.base_dir = mkdtemp()
self.tempdir = mkdtemp(dir=self.base_dir)
self.tempdest = mkdtemp(dir=self.base_dir)
def tearDown(self):
try:
shutil.rmtree(self.base_dir)
except:
pass
def test_untar_package_success(self):
# set up a valid tarball to test against
with NamedTemporaryFile(
prefix='my-package.2', suffix='.tar.gz', dir=self.tempdir, delete=False
) as named_tar_file:
tar_file_full_path = named_tar_file.name
with NamedTemporaryFile(
prefix='a', suffix='.txt', dir=self.tempdir
) as file_a:
file_a.write(b'some text in the text file')
relative_file_a = os.path.basename(file_a.name)
with tarfile.open(fileobj=named_tar_file, mode='w:gz') as tar:
tar.addfile(tarfile.TarInfo(relative_file_a), open(file_a.name))
# now we test can test that we can untar the file successfully
assert tarfile.is_tarfile(tar.name)
dbt.clients.system.untar_package(tar_file_full_path, self.tempdest)
path = Path(os.path.join(self.tempdest, relative_file_a))
assert path.is_file()
def test_untar_package_failure(self):
# create a text file then rename it as a tar (so it's invalid)
with NamedTemporaryFile(
prefix='a', suffix='.txt', dir=self.tempdir, delete=False
) as file_a:
file_a.write(b'some text in the text file')
txt_file_name = file_a.name
file_path= os.path.dirname(txt_file_name)
tar_file_path = os.path.join(file_path, 'mypackage.2.tar.gz')
os.rename(txt_file_name, tar_file_path)
# now that we're set up, test that untarring the file fails
with self.assertRaises(tarfile.ReadError) as exc:
dbt.clients.system.untar_package(tar_file_path, self.tempdest)
def test_untar_package_empty(self):
# create a tarball with nothing in it
with NamedTemporaryFile(
prefix='my-empty-package.2', suffix='.tar.gz', dir=self.tempdir
) as named_file:
# make sure we throw an error for the empty file
with self.assertRaises(tarfile.ReadError) as exc:
dbt.clients.system.untar_package(named_file.name, self.tempdest)
self.assertEqual("empty file", str(exc.exception))
|
src/sage/misc/sh.py | bopopescu/sage | 1,742 | 11149509 | "Evaluating shell scripts"
import os
class Sh:
r"""
Evaluates a shell script and returns the output.
To use this from the notebook type ``sh`` at the beginning of
the input cell. The working directory is then the (usually
temporary) directory where the Sage worksheet process is
executing.
"""
def eval(self, code, globals=None, locals=None):
r"""
This is difficult to test because the output goes to the
screen rather than being captured by the doctest program, so
the following really only tests that the command doesn't bomb,
not that it gives the right output::
sage: sh.eval('''echo "Hello there"\nif [ $? -eq 0 ]; then\necho "good"\nfi''') # random output
"""
# Print out the current absolute path, which is where the code
# will be evaluated. Evidently, users find this comforting,
# though I personally find it to be a bit much (<NAME>).
print(os.path.abspath('.'))
# Evaluate the input code block. Fortunately, os.system works
# fine with multiline input (in contrast to subprocess.Popen).
os.system(str(code))
# Return '' so nothing extra (for example an unsightly None)
# gets printed when doing %sh in the notebook.
return ''
# Create the sh object, so that %sh mode works in the notebook.
sh = Sh()
|
tests/context/test_policy.py | MolecularAI/aizynthfinder | 219 | 11149540 | <gh_stars>100-1000
import pytest
import numpy as np
from aizynthfinder.chem import (
TreeMolecule,
SmilesBasedRetroReaction,
TemplatedRetroReaction,
)
from aizynthfinder.context.policy import (
TemplateBasedExpansionStrategy,
QuickKerasFilter,
ReactantsCountFilter,
)
from aizynthfinder.utils.exceptions import RejectionException, PolicyException
def test_create_templated_expansion_strategy_wo_kwargs():
with pytest.raises(
PolicyException, match=" class needs to be initiated with keyword arguments"
):
_ = TemplateBasedExpansionStrategy("dummy", None)
def test_load_templated_expansion_policy(
default_config, setup_template_expansion_policy, mocker
):
strategy, mocked_keras_model = setup_template_expansion_policy()
mocked_keras_model.assert_called_once_with("dummy.hdf5", custom_objects=mocker.ANY)
assert len(strategy.templates) == 3
def test_load_invalid_templated_expansion_policy(
default_config, create_dummy_templates, mock_keras_model
):
templates_filename = create_dummy_templates(3)
mock_keras_model.return_value.output = np.zeros((2, 2))
with pytest.raises(PolicyException):
TemplateBasedExpansionStrategy(
"policy1",
default_config,
source="dummy.hdf5",
templatefile=templates_filename,
)
def test_load_expansion_policy(default_config, setup_template_expansion_policy):
strategy, _ = setup_template_expansion_policy()
expansion_policy = default_config.expansion_policy
expansion_policy.load(strategy)
with pytest.raises(PolicyException):
expansion_policy.load(5)
def test_load_expansion_policy_from_config_files(
default_config, mock_keras_model, create_dummy_templates
):
template_filename = create_dummy_templates(3)
expansion_policy = default_config.expansion_policy
expansion_policy.load_from_config(
**{
"files": {
"policy1": ["dummy1", template_filename],
"policy2": ["dummy1", template_filename],
}
}
)
assert "policy1" in expansion_policy.items
assert len(expansion_policy["policy1"].templates) == 3
assert "policy2" in expansion_policy.items
assert len(expansion_policy["policy2"].templates) == 3
def test_load_expansion_policy_from_config_custom(
default_config, mock_keras_model, create_dummy_templates
):
template_filename = create_dummy_templates(3)
expansion_policy = default_config.expansion_policy
expansion_policy.load_from_config(
**{
"TemplateBasedExpansionStrategy": {
"policy1": {"source": "dummy1", "templatefile": template_filename}
},
"aizynthfinder.context.policy.TemplateBasedExpansionStrategy": {
"policy2": {"source": "dummy1", "templatefile": template_filename}
},
}
)
assert "policy1" in expansion_policy.items
assert len(expansion_policy["policy1"].templates) == 3
assert "policy2" in expansion_policy.items
assert len(expansion_policy["policy2"].templates) == 3
def test_get_actions(default_config, setup_template_expansion_policy):
strategy, _ = setup_template_expansion_policy()
expansion_policy = default_config.expansion_policy
expansion_policy.load(strategy)
mols = [TreeMolecule(smiles="CCO", parent=None)]
with pytest.raises(PolicyException, match="selected"):
expansion_policy.get_actions(mols)
expansion_policy.select("policy1")
actions, priors = expansion_policy.get_actions(mols)
assert priors == [0.7, 0.2]
policy_names = [action.metadata["policy_name"] for action in actions]
assert policy_names == ["policy1", "policy1"]
expansion_policy._config.cutoff_cumulative = 1.0
actions, priors = expansion_policy.get_actions(mols)
assert priors == [0.7, 0.2, 0.1]
expansion_policy._config.cutoff_number = 1
actions, priors = expansion_policy.get_actions(mols)
assert priors == [0.7]
def test_get_actions_two_policies(default_config, setup_template_expansion_policy):
expansion_policy = default_config.expansion_policy
strategy1, _ = setup_template_expansion_policy("policy1")
expansion_policy.load(strategy1)
strategy2, _ = setup_template_expansion_policy("policy2")
expansion_policy.load(strategy2)
default_config.additive_expansion = True
expansion_policy.select(["policy1", "policy2"])
mols = [TreeMolecule(smiles="CCO", parent=None)]
actions, priors = expansion_policy.get_actions(mols)
policy_names = [action.metadata["policy_name"] for action in actions]
assert policy_names == ["policy1"] * 2 + ["policy2"] * 2
assert priors == [0.7, 0.2, 0.7, 0.2]
expansion_policy._config.cutoff_cumulative = 1.0
actions, priors = expansion_policy.get_actions(mols)
assert priors == [0.7, 0.2, 0.1, 0.7, 0.2, 0.1]
expansion_policy._config.cutoff_number = 1
actions, priors = expansion_policy.get_actions(mols)
assert priors == [0.7, 0.7]
default_config.additive_expansion = False
default_config.cutoff_number = 2
actions, priors = expansion_policy.get_actions(mols)
policy_names = [action.metadata["policy_name"] for action in actions]
assert policy_names == ["policy1", "policy1"]
assert priors == [0.7, 0.2]
def test_create_quick_filter_strategy_wo_kwargs():
with pytest.raises(
PolicyException, match=" class needs to be initiated with keyword arguments"
):
_ = QuickKerasFilter("dummy", None)
def test_load_filter_policy(default_config, mock_keras_model, mocker):
strategy = QuickKerasFilter("policy1", default_config, source="dummy.hdf5")
default_config.filter_policy.load(strategy)
mock_keras_model.assert_called_once_with("dummy.hdf5", custom_objects=mocker.ANY)
with pytest.raises(PolicyException):
default_config.filter_policy.load(5.0)
def test_load_filter_policy_from_config_files(default_config, mock_keras_model):
filter_policy = default_config.filter_policy
filter_policy.load_from_config(
**{
"files": {
"policy1": "dummy1",
"policy2": "dummy1",
}
}
)
assert "policy1" in filter_policy.items
assert "policy2" in filter_policy.items
def test_load_filter_policy_from_config_custom(default_config, mock_keras_model):
filter_policy = default_config.filter_policy
filter_policy.load_from_config(
**{
"QuickKerasFilter": {"policy1": {"source": "dummy1"}},
"aizynthfinder.context.policy.QuickKerasFilter": {
"policy2": {"source": "dummy1"}
},
}
)
assert "policy1" in filter_policy.items
assert "policy2" in filter_policy.items
def test_filter_rejection(default_config, mock_keras_model):
filter_policy = default_config.filter_policy
filter_policy.load_from_config(**{"files": {"policy1": "dummy1"}})
mol = TreeMolecule(
parent=None, smiles="CN1CCC(C(=O)c2cccc(NC(=O)c3ccc(F)cc3)c2F)CC1"
)
reaction = SmilesBasedRetroReaction(
mol, reactants_str="CN1CCC(Cl)CC1.N#Cc1cccc(NC(=O)c2ccc(F)cc2)c1F.O"
)
with pytest.raises(PolicyException, match="selected"):
filter_policy(reaction)
filter_policy.select("policy1")
filter_policy._config.filter_cutoff = 0.9
with pytest.raises(RejectionException):
filter_policy(reaction)
filter_policy._config.filter_cutoff = 0.15
filter_policy(reaction)
def test_reactants_count_rejection(default_config):
smarts = (
"([C:3]-[N;H0;D2;+0:2]=[C;H0;D3;+0:1](-[c:4]1:[c:5]:[c:6]:[c:7]:[c:8]:[c:9]:1)-[c;H0;D3;+0:11](:[c:10]):[c:12])>>"
"(O=[C;H0;D3;+0:1](-[NH;D2;+0:2]-[C:3])-[c:4]1:[c:5]:[c:6]:[c:7]:[c:8]:[c:9]:1.[c:10]:[cH;D2;+0:11]:[c:12])"
)
mol = TreeMolecule(parent=None, smiles="c1c2c(ccc1)CCN=C2c3ccccc3")
rxn1 = TemplatedRetroReaction(mol=mol, smarts=smarts)
filter = ReactantsCountFilter("dummy", default_config)
assert len(rxn1.reactants) == 2
rxn2 = rxn1.copy(index=1)
if len(rxn1.reactants[0]) == 1:
rxn1, rxn2 = rxn2, rxn1
assert filter(rxn2) is None
with pytest.raises(RejectionException):
filter(rxn1)
|
evennia/scripts/manager.py | Henddher/evennia | 1,544 | 11149544 | <gh_stars>1000+
"""
The custom manager for Scripts.
"""
from django.db.models import Q
from evennia.typeclasses.managers import TypedObjectManager, TypeclassManager
from evennia.utils.utils import make_iter
__all__ = ("ScriptManager",)
_GA = object.__getattribute__
VALIDATE_ITERATION = 0
class ScriptDBManager(TypedObjectManager):
"""
This Scriptmanager implements methods for searching
and manipulating Scripts directly from the database.
Evennia-specific search methods (will return Typeclasses or
lists of Typeclasses, whereas Django-general methods will return
Querysets or database objects).
dbref (converter)
get_id (or dbref_search)
get_dbref_range
object_totals
typeclass_search
get_all_scripts_on_obj
get_all_scripts
delete_script
remove_non_persistent
validate
script_search (equivalent to evennia.search_script)
copy_script
"""
def get_all_scripts_on_obj(self, obj, key=None):
"""
Find all Scripts related to a particular object.
Args:
obj (Object): Object whose Scripts we are looking for.
key (str, optional): Script identifier - can be given as a
dbref or name string. If given, only scripts matching the
key on the object will be returned.
Returns:
matches (list): Matching scripts.
"""
if not obj:
return []
account = _GA(_GA(obj, "__dbclass__"), "__name__") == "AccountDB"
if key:
dbref = self.dbref(key)
if dbref or dbref == 0:
if account:
return self.filter(db_account=obj, id=dbref)
else:
return self.filter(db_obj=obj, id=dbref)
elif account:
return self.filter(db_account=obj, db_key=key)
else:
return self.filter(db_obj=obj, db_key=key)
elif account:
return self.filter(db_account=obj)
else:
return self.filter(db_obj=obj)
def get_all_scripts(self, key=None):
"""
Get all scripts in the database.
Args:
key (str or int, optional): Restrict result to only those
with matching key or dbref.
Returns:
scripts (list): All scripts found, or those matching `key`.
"""
if key:
script = []
dbref = self.dbref(key)
if dbref:
return self.filter(id=dbref)
return self.filter(db_key__iexact=key.strip())
return self.all()
def delete_script(self, dbref):
"""
This stops and deletes a specific script directly from the
script database.
Args:
dbref (int): Database unique id.
Notes:
This might be needed for global scripts not tied to a
specific game object
"""
scripts = self.get_id(dbref)
for script in make_iter(scripts):
script.stop()
def remove_non_persistent(self, obj=None):
"""
This cleans up the script database of all non-persistent
scripts. It is called every time the server restarts.
Args:
obj (Object, optional): Only remove non-persistent scripts
assigned to this object.
"""
if obj:
to_stop = self.filter(db_obj=obj, db_persistent=False, db_is_active=True)
to_delete = self.filter(db_obj=obj, db_persistent=False, db_is_active=False)
else:
to_stop = self.filter(db_persistent=False, db_is_active=True)
to_delete = self.filter(db_persistent=False, db_is_active=False)
nr_deleted = to_stop.count() + to_delete.count()
for script in to_stop:
script.stop()
for script in to_delete:
script.delete()
return nr_deleted
def validate(self, scripts=None, obj=None, key=None, dbref=None, init_mode=None):
"""
This will step through the script database and make sure
all objects run scripts that are still valid in the context
they are in. This is called by the game engine at regular
intervals but can also be initiated by player scripts.
Only one of the arguments are supposed to be supplied
at a time, since they are exclusive to each other.
Args:
scripts (list, optional): A list of script objects to
validate.
obj (Object, optional): Validate only scripts defined on
this object.
key (str): Validate only scripts with this key.
dbref (int): Validate only the single script with this
particular id.
init_mode (str, optional): This is used during server
upstart and can have three values:
- `None` (no init mode). Called during run.
- `"reset"` - server reboot. Kill non-persistent scripts
- `"reload"` - server reload. Keep non-persistent scripts.
Returns:
nr_started, nr_stopped (tuple): Statistics on how many objects
where started and stopped.
Notes:
This method also makes sure start any scripts it validates
which should be harmless, since already-active scripts have
the property 'is_running' set and will be skipped.
"""
# we store a variable that tracks if we are calling a
# validation from within another validation (avoids
# loops).
global VALIDATE_ITERATION
if VALIDATE_ITERATION > 0:
# we are in a nested validation. Exit.
VALIDATE_ITERATION -= 1
return None, None
VALIDATE_ITERATION += 1
# not in a validation - loop. Validate as normal.
nr_started = 0
nr_stopped = 0
if init_mode:
if init_mode == "reset":
# special mode when server starts or object logs in.
# This deletes all non-persistent scripts from database
nr_stopped += self.remove_non_persistent(obj=obj)
# turn off the activity flag for all remaining scripts
scripts = self.get_all_scripts()
for script in scripts:
script.is_active = False
elif not scripts:
# normal operation
if dbref and self.dbref(dbref, reqhash=False):
scripts = self.get_id(dbref)
elif obj:
scripts = self.get_all_scripts_on_obj(obj, key=key)
else:
scripts = self.get_all_scripts(key=key)
if not scripts:
# no scripts available to validate
VALIDATE_ITERATION -= 1
return None, None
for script in scripts:
if script.is_valid():
nr_started += script.start(force_restart=init_mode)
else:
script.stop()
nr_stopped += 1
VALIDATE_ITERATION -= 1
return nr_started, nr_stopped
def search_script(self, ostring, obj=None, only_timed=False, typeclass=None):
"""
Search for a particular script.
Args:
ostring (str): Search criterion - a script dbef or key.
obj (Object, optional): Limit search to scripts defined on
this object
only_timed (bool): Limit search only to scripts that run
on a timer.
typeclass (class or str): Typeclass or path to typeclass.
"""
ostring = ostring.strip()
dbref = self.dbref(ostring)
if dbref:
# this is a dbref, try to find the script directly
dbref_match = self.dbref_search(dbref)
if dbref_match and not (
(obj and obj != dbref_match.obj) or (only_timed and dbref_match.interval)
):
return [dbref_match]
if typeclass:
if callable(typeclass):
typeclass = "%s.%s" % (typeclass.__module__, typeclass.__name__)
else:
typeclass = "%s" % typeclass
# not a dbref; normal search
obj_restriction = obj and Q(db_obj=obj) or Q()
timed_restriction = only_timed and Q(db_interval__gt=0) or Q()
typeclass_restriction = typeclass and Q(db_typeclass_path=typeclass) or Q()
scripts = self.filter(
timed_restriction & obj_restriction & typeclass_restriction & Q(db_key__iexact=ostring)
)
return scripts
# back-compatibility alias
script_search = search_script
def copy_script(self, original_script, new_key=None, new_obj=None, new_locks=None):
"""
Make an identical copy of the original_script.
Args:
original_script (Script): The Script to copy.
new_key (str, optional): Rename the copy.
new_obj (Object, optional): Place copy on different Object.
new_locks (str, optional): Give copy different locks from
the original.
Returns:
script_copy (Script): A new Script instance, copied from
the original.
"""
typeclass = original_script.typeclass_path
new_key = new_key if new_key is not None else original_script.key
new_obj = new_obj if new_obj is not None else original_script.obj
new_locks = new_locks if new_locks is not None else original_script.db_lock_storage
from evennia.utils import create
new_script = create.create_script(
typeclass, key=new_key, obj=new_obj, locks=new_locks, autostart=True
)
return new_script
class ScriptManager(ScriptDBManager, TypeclassManager):
pass
|
snips_nlu/intent_classifier/featurizer.py | CharlyBlavier/snips-nlu-Copy | 3,764 | 11149568 | <filename>snips_nlu/intent_classifier/featurizer.py<gh_stars>1000+
from __future__ import division, unicode_literals
import json
from builtins import str, zip
from copy import deepcopy
from pathlib import Path
from future.utils import iteritems
from snips_nlu.common.utils import (
json_string, fitted_required, replace_entities_with_placeholders,
check_persisted_path)
from snips_nlu.constants import (
DATA, ENTITY, ENTITY_KIND, LANGUAGE, NGRAM, TEXT, ENTITIES)
from snips_nlu.dataset import get_text_from_chunks, validate_and_format_dataset
from snips_nlu.entity_parser.builtin_entity_parser import (
is_builtin_entity)
from snips_nlu.exceptions import (_EmptyDatasetUtterancesError, LoadingError)
from snips_nlu.languages import get_default_sep
from snips_nlu.pipeline.configs import FeaturizerConfig
from snips_nlu.pipeline.configs.intent_classifier import (
CooccurrenceVectorizerConfig, TfidfVectorizerConfig)
from snips_nlu.pipeline.processing_unit import ProcessingUnit
from snips_nlu.preprocessing import stem, tokenize_light
from snips_nlu.resources import get_stop_words, get_word_cluster
from snips_nlu.slot_filler.features_utils import get_all_ngrams
@ProcessingUnit.register("featurizer")
class Featurizer(ProcessingUnit):
"""Feature extractor for text classification relying on ngrams tfidf and
optionally word cooccurrences features"""
config_type = FeaturizerConfig
def __init__(self, config=None, **shared):
super(Featurizer, self).__init__(config, **shared)
self.language = None
self.tfidf_vectorizer = None
self.cooccurrence_vectorizer = None
@property
def fitted(self):
if not self.tfidf_vectorizer or not self.tfidf_vectorizer.vocabulary:
return False
return True
@property
def feature_index_to_feature_name(self):
"""Maps the feature index of the feature matrix to printable features
names. Mainly useful for debug.
Returns:
dict: a dict mapping feature indices to printable features names
"""
if not self.fitted:
return dict()
index = {
i: "ngram:%s" % ng
for ng, i in iteritems(self.tfidf_vectorizer.vocabulary)
}
num_ng = len(index)
if self.cooccurrence_vectorizer is not None:
for word_pair, j in iteritems(
self.cooccurrence_vectorizer.word_pairs):
index[j + num_ng] = "pair:%s+%s" % (word_pair[0], word_pair[1])
return index
def fit(self, dataset, utterances, classes, none_class):
self.fit_transform(dataset, utterances, classes, none_class)
return self
def fit_transform(self, dataset, utterances, classes, none_class):
import scipy.sparse as sp
dataset = validate_and_format_dataset(dataset)
self.language = dataset[LANGUAGE]
utterances_texts = (get_text_from_chunks(u[DATA]) for u in utterances)
if not any(tokenize_light(q, self.language) for q in utterances_texts):
raise _EmptyDatasetUtterancesError(
"Tokenized utterances are empty")
x_tfidf = self._fit_transform_tfidf_vectorizer(
utterances, classes, dataset)
x = x_tfidf
if self.config.added_cooccurrence_feature_ratio:
self._fit_cooccurrence_vectorizer(
utterances, classes, none_class, dataset)
x_cooccurrence = self.cooccurrence_vectorizer.transform(utterances)
x = sp.hstack((x_tfidf, x_cooccurrence))
return x
def transform(self, utterances):
import scipy.sparse as sp
x = self.tfidf_vectorizer.transform(utterances)
if self.cooccurrence_vectorizer:
x_cooccurrence = self.cooccurrence_vectorizer.transform(utterances)
x = sp.hstack((x, x_cooccurrence))
return x
def _fit_transform_tfidf_vectorizer(self, x, y, dataset):
from sklearn.feature_selection import chi2
self.tfidf_vectorizer = TfidfVectorizer(
config=self.config.tfidf_vectorizer_config,
builtin_entity_parser=self.builtin_entity_parser,
custom_entity_parser=self.custom_entity_parser,
resources=self.resources,
random_state=self.random_state,
)
x_tfidf = self.tfidf_vectorizer.fit_transform(x, dataset)
if not self.tfidf_vectorizer.vocabulary:
raise _EmptyDatasetUtterancesError(
"Dataset is empty or with empty utterances")
_, tfidf_pval = chi2(x_tfidf, y)
best_tfidf_features = set(i for i, v in enumerate(tfidf_pval)
if v < self.config.pvalue_threshold)
if not best_tfidf_features:
best_tfidf_features = set(
idx for idx, val in enumerate(tfidf_pval) if
val == tfidf_pval.min())
best_ngrams = [ng for ng, i in
iteritems(self.tfidf_vectorizer.vocabulary)
if i in best_tfidf_features]
self.tfidf_vectorizer.limit_vocabulary(best_ngrams)
# We can't return x_tfidf[:best_tfidf_features] because of the
# normalization in the transform of the tfidf_vectorizer
# this would lead to inconsistent result between: fit_transform(x, y)
# and fit(x, y).transform(x)
return self.tfidf_vectorizer.transform(x)
def _fit_cooccurrence_vectorizer(self, x, classes, none_class, dataset):
import numpy as np
from sklearn.feature_selection import chi2
non_null_x = (d for d, c in zip(x, classes) if c != none_class)
self.cooccurrence_vectorizer = CooccurrenceVectorizer(
config=self.config.cooccurrence_vectorizer_config,
builtin_entity_parser=self.builtin_entity_parser,
custom_entity_parser=self.custom_entity_parser,
resources=self.resources,
random_state=self.random_state,
)
x_cooccurrence = self.cooccurrence_vectorizer.fit(
non_null_x, dataset).transform(x)
if not self.cooccurrence_vectorizer.word_pairs:
return self
_, pval = chi2(x_cooccurrence, classes)
top_k = int(self.config.added_cooccurrence_feature_ratio * len(
self.tfidf_vectorizer.idf_diag))
# No selection if k is greater or equal than the number of word pairs
if top_k >= len(self.cooccurrence_vectorizer.word_pairs):
return self
top_k_cooccurrence_ix = np.argpartition(
pval, top_k - 1, axis=None)[:top_k]
top_k_cooccurrence_ix = set(top_k_cooccurrence_ix)
top_word_pairs = [
pair for pair, i in iteritems(
self.cooccurrence_vectorizer.word_pairs)
if i in top_k_cooccurrence_ix
]
self.cooccurrence_vectorizer.limit_word_pairs(top_word_pairs)
return self
@check_persisted_path
def persist(self, path):
path.mkdir()
# Persist the vectorizers
tfidf_vectorizer = None
if self.tfidf_vectorizer:
tfidf_vectorizer = self.tfidf_vectorizer.unit_name
tfidf_vectorizer_path = path / tfidf_vectorizer
self.tfidf_vectorizer.persist(tfidf_vectorizer_path)
cooccurrence_vectorizer = None
if self.cooccurrence_vectorizer:
cooccurrence_vectorizer = self.cooccurrence_vectorizer.unit_name
cooccurrence_vectorizer_path = path / cooccurrence_vectorizer
self.cooccurrence_vectorizer.persist(cooccurrence_vectorizer_path)
# Persist main object
self_as_dict = {
"language_code": self.language,
"tfidf_vectorizer": tfidf_vectorizer,
"cooccurrence_vectorizer": cooccurrence_vectorizer,
"config": self.config.to_dict()
}
featurizer_path = path / "featurizer.json"
with featurizer_path.open("w", encoding="utf-8") as f:
f.write(json_string(self_as_dict))
# Persist metadata
self.persist_metadata(path)
@classmethod
def from_path(cls, path, **shared):
path = Path(path)
model_path = path / "featurizer.json"
if not model_path.exists():
raise LoadingError("Missing featurizer model file: %s"
% model_path.name)
with model_path.open("r", encoding="utf-8") as f:
featurizer_dict = json.load(f)
featurizer_config = featurizer_dict["config"]
featurizer = cls(featurizer_config, **shared)
featurizer.language = featurizer_dict["language_code"]
tfidf_vectorizer = featurizer_dict["tfidf_vectorizer"]
if tfidf_vectorizer:
vectorizer_path = path / featurizer_dict["tfidf_vectorizer"]
tfidf_vectorizer = TfidfVectorizer.from_path(
vectorizer_path, **shared)
featurizer.tfidf_vectorizer = tfidf_vectorizer
cooccurrence_vectorizer = featurizer_dict["cooccurrence_vectorizer"]
if cooccurrence_vectorizer:
vectorizer_path = path / featurizer_dict["cooccurrence_vectorizer"]
cooccurrence_vectorizer = CooccurrenceVectorizer.from_path(
vectorizer_path, **shared)
featurizer.cooccurrence_vectorizer = cooccurrence_vectorizer
return featurizer
@ProcessingUnit.register("tfidf_vectorizer")
class TfidfVectorizer(ProcessingUnit):
"""Wrapper of the scikit-learn TfidfVectorizer"""
config_type = TfidfVectorizerConfig
def __init__(self, config=None, **shared):
super(TfidfVectorizer, self).__init__(config, **shared)
self._tfidf_vectorizer = None
self._language = None
self.builtin_entity_scope = None
def fit(self, x, dataset):
"""Fits the idf of the vectorizer on the given utterances after
enriching them with builtin entities matches, custom entities matches
and the potential word clusters matches
Args:
x (list of dict): list of utterances
dataset (dict): dataset from which x was extracted (needed to
extract the language and the builtin entity scope)
Returns:
:class:`.TfidfVectorizer`: The fitted vectorizer
"""
self.load_resources_if_needed(dataset[LANGUAGE])
self.fit_builtin_entity_parser_if_needed(dataset)
self.fit_custom_entity_parser_if_needed(dataset)
self._language = dataset[LANGUAGE]
self._init_vectorizer(self._language)
self.builtin_entity_scope = set(
e for e in dataset[ENTITIES] if is_builtin_entity(e))
preprocessed_data = self._preprocess(x)
utterances = [
self._enrich_utterance(u, builtin_ents, custom_ents, w_clusters)
for u, builtin_ents, custom_ents, w_clusters
in zip(*preprocessed_data)
]
return self._tfidf_vectorizer.fit(utterances)
def fit_transform(self, x, dataset):
"""Fits the idf of the vectorizer on the given utterances after
enriching them with builtin entities matches, custom entities matches
and the potential word clusters matches.
Returns the featurized utterances.
Args:
x (list of dict): list of utterances
dataset (dict): dataset from which x was extracted (needed to
extract the language and the builtin entity scope)
Returns:
:class:`.scipy.sparse.csr_matrix`: A sparse matrix X of shape
(len(x), len(self.vocabulary)) where X[i, j] contains tfdif of
the ngram of index j of the vocabulary in the utterance i
"""
self.load_resources_if_needed(dataset[LANGUAGE])
self.fit_builtin_entity_parser_if_needed(dataset)
self.fit_custom_entity_parser_if_needed(dataset)
self._language = dataset[LANGUAGE]
self._init_vectorizer(self._language)
self.builtin_entity_scope = set(
e for e in dataset[ENTITIES] if is_builtin_entity(e))
preprocessed_data = self._preprocess(x)
utterances = [
self._enrich_utterance(u, builtin_ents, custom_ents, w_clusters)
for u, builtin_ents, custom_ents, w_clusters
in zip(*preprocessed_data)
]
return self._tfidf_vectorizer.fit_transform(utterances)
@property
def fitted(self):
return self._tfidf_vectorizer is not None and hasattr(
self._tfidf_vectorizer, "vocabulary_")
@fitted_required
def transform(self, x):
"""Featurizes the given utterances after enriching them with builtin
entities matches, custom entities matches and the potential word
clusters matches
Args:
x (list of dict): list of utterances
Returns:
:class:`.scipy.sparse.csr_matrix`: A sparse matrix X of shape
(len(x), len(self.vocabulary)) where X[i, j] contains tfdif of
the ngram of index j of the vocabulary in the utterance i
Raises:
NotTrained: when the vectorizer is not fitted:
"""
utterances = [self._enrich_utterance(*data)
for data in zip(*self._preprocess(x))]
return self._tfidf_vectorizer.transform(utterances)
def _preprocess(self, utterances):
normalized_utterances = deepcopy(utterances)
for u in normalized_utterances:
nb_chunks = len(u[DATA])
for i, chunk in enumerate(u[DATA]):
chunk[TEXT] = _normalize_stem(
chunk[TEXT], self.language, self.resources,
self.config.use_stemming)
if i < nb_chunks - 1:
chunk[TEXT] += " "
# Extract builtin entities on unormalized utterances
builtin_ents = [
self.builtin_entity_parser.parse(
get_text_from_chunks(u[DATA]),
self.builtin_entity_scope, use_cache=True)
for u in utterances
]
# Extract builtin entities on normalized utterances
custom_ents = [
self.custom_entity_parser.parse(
get_text_from_chunks(u[DATA]), use_cache=True)
for u in normalized_utterances
]
if self.config.word_clusters_name:
# Extract world clusters on unormalized utterances
original_utterances_text = [get_text_from_chunks(u[DATA])
for u in utterances]
w_clusters = [
_get_word_cluster_features(
tokenize_light(u.lower(), self.language),
self.config.word_clusters_name,
self.resources)
for u in original_utterances_text
]
else:
w_clusters = [None for _ in normalized_utterances]
return normalized_utterances, builtin_ents, custom_ents, w_clusters
def _enrich_utterance(self, utterance, builtin_entities, custom_entities,
word_clusters):
custom_entities_features = [
_entity_name_to_feature(e[ENTITY_KIND], self.language)
for e in custom_entities]
builtin_entities_features = [
_builtin_entity_to_feature(ent[ENTITY_KIND], self.language)
for ent in builtin_entities
]
# We remove values of builtin slots from the utterance to avoid
# learning specific samples such as '42' or 'tomorrow'
filtered_tokens = [
chunk[TEXT] for chunk in utterance[DATA]
if ENTITY not in chunk or not is_builtin_entity(chunk[ENTITY])
]
features = get_default_sep(self.language).join(filtered_tokens)
if builtin_entities_features:
features += " " + " ".join(sorted(builtin_entities_features))
if custom_entities_features:
features += " " + " ".join(sorted(custom_entities_features))
if word_clusters:
features += " " + " ".join(sorted(word_clusters))
return features
@property
def language(self):
# Create this getter to prevent the language from being set elsewhere
# than in the fit
return self._language
@property
def vocabulary(self):
if self._tfidf_vectorizer and hasattr(
self._tfidf_vectorizer, "vocabulary_"):
return self._tfidf_vectorizer.vocabulary_
return None
@fitted_required
def limit_vocabulary(self, ngrams):
"""Restrict the vectorizer vocabulary to the given ngrams
Args:
ngrams (iterable of str or tuples of str): ngrams to keep
Returns:
:class:`.TfidfVectorizer`: The vectorizer with limited vocabulary
"""
import scipy.sparse as sp
ngrams = set(ngrams)
vocab = self.vocabulary
existing_ngrams = set(vocab)
extra_values = ngrams - existing_ngrams
if extra_values:
raise ValueError("Invalid ngrams %s, expected values in word_pairs"
% sorted(extra_values))
new_ngrams, new_index = zip(*sorted((ng, vocab[ng]) for ng in ngrams))
self._tfidf_vectorizer.vocabulary_ = {
ng: new_i for new_i, ng in enumerate(new_ngrams)
}
# pylint: disable=protected-access
# The new_idf_data is valid because the previous _idf_diag was indexed
# with sorted ngrams and new_index is also indexed with sorted ngrams
new_idf_data = self._tfidf_vectorizer._tfidf._idf_diag.data[
list(new_index)]
self._tfidf_vectorizer._tfidf._idf_diag = sp.spdiags(
new_idf_data, diags=0, m=len(new_index), n=len(new_index),
format="csr")
# pylint: enable=protected-access
return self
@property
def idf_diag(self):
if self._tfidf_vectorizer and hasattr(
self._tfidf_vectorizer, "vocabulary_"):
return self._tfidf_vectorizer.idf_
return None
def _init_vectorizer(self, language):
from sklearn.feature_extraction.text import (
TfidfVectorizer as SklearnTfidfVectorizer)
self._tfidf_vectorizer = SklearnTfidfVectorizer(
tokenizer=lambda x: tokenize_light(x, language))
return self
@check_persisted_path
def persist(self, path):
path.mkdir()
vectorizer_ = None
if self._tfidf_vectorizer is not None:
vocab = {k: int(v) for k, v in iteritems(self.vocabulary)}
idf_diag = self.idf_diag.tolist()
vectorizer_ = {
"vocab": vocab,
"idf_diag": idf_diag
}
builtin_entity_scope = None
if self.builtin_entity_scope is not None:
builtin_entity_scope = list(self.builtin_entity_scope)
self_as_dict = {
"vectorizer": vectorizer_,
"language_code": self.language,
"builtin_entity_scope": builtin_entity_scope,
"config": self.config.to_dict(),
}
vectorizer_path = path / "vectorizer.json"
with vectorizer_path.open("w", encoding="utf-8") as f:
f.write(json_string(self_as_dict))
self.persist_metadata(path)
@classmethod
# pylint: disable=W0212
def from_path(cls, path, **shared):
import numpy as np
import scipy.sparse as sp
from sklearn.feature_extraction.text import (
TfidfTransformer, TfidfVectorizer as SklearnTfidfVectorizer)
path = Path(path)
model_path = path / "vectorizer.json"
if not model_path.exists():
raise LoadingError("Missing vectorizer model file: %s"
% model_path.name)
with model_path.open("r", encoding="utf-8") as f:
vectorizer_dict = json.load(f)
vectorizer = cls(vectorizer_dict["config"], **shared)
vectorizer._language = vectorizer_dict["language_code"]
builtin_entity_scope = vectorizer_dict["builtin_entity_scope"]
if builtin_entity_scope is not None:
builtin_entity_scope = set(builtin_entity_scope)
vectorizer.builtin_entity_scope = builtin_entity_scope
vectorizer_ = vectorizer_dict["vectorizer"]
if vectorizer_:
vocab = vectorizer_["vocab"]
idf_diag_data = vectorizer_["idf_diag"]
idf_diag_data = np.array(idf_diag_data)
idf_diag_shape = (len(idf_diag_data), len(idf_diag_data))
row = list(range(idf_diag_shape[0]))
col = list(range(idf_diag_shape[0]))
idf_diag = sp.csr_matrix(
(idf_diag_data, (row, col)), shape=idf_diag_shape)
tfidf_transformer = TfidfTransformer()
tfidf_transformer._idf_diag = idf_diag
vectorizer_ = SklearnTfidfVectorizer(
tokenizer=lambda x: tokenize_light(x, vectorizer._language))
vectorizer_.vocabulary_ = vocab
vectorizer_._tfidf = tfidf_transformer
vectorizer._tfidf_vectorizer = vectorizer_
return vectorizer
@ProcessingUnit.register("cooccurrence_vectorizer")
class CooccurrenceVectorizer(ProcessingUnit):
"""Featurizer that takes utterances and extracts ordered word cooccurrence
features matrix from them"""
config_type = CooccurrenceVectorizerConfig
def __init__(self, config=None, **shared):
super(CooccurrenceVectorizer, self).__init__(config, **shared)
self._word_pairs = None
self._language = None
self.builtin_entity_scope = None
@property
def language(self):
# Create this getter to prevent the language from being set elsewhere
# than in the fit
return self._language
@property
def word_pairs(self):
return self._word_pairs
def fit(self, x, dataset):
"""Fits the CooccurrenceVectorizer
Given a list of utterances the CooccurrenceVectorizer will extract word
pairs appearing in the same utterance. The order in which the words
appear is kept. Additionally, if self.config.window_size is not None
then the vectorizer will only look in a context window of
self.config.window_size after each word.
Args:
x (iterable): list of utterances
dataset (dict): dataset from which x was extracted (needed to
extract the language and the builtin entity scope)
Returns:
:class:`.CooccurrenceVectorizer`: The fitted vectorizer
"""
self.load_resources_if_needed(dataset[LANGUAGE])
self.fit_builtin_entity_parser_if_needed(dataset)
self.fit_custom_entity_parser_if_needed(dataset)
self._language = dataset[LANGUAGE]
self.builtin_entity_scope = set(
e for e in dataset[ENTITIES] if is_builtin_entity(e))
preprocessed = self._preprocess(list(x))
utterances = [
self._enrich_utterance(utterance, builtin_ents, custom_ent)
for utterance, builtin_ents, custom_ent in zip(*preprocessed)]
word_pairs = set(
p for u in utterances for p in self._extract_word_pairs(u))
self._word_pairs = {
pair: i for i, pair in enumerate(sorted(word_pairs))
}
return self
@property
def fitted(self):
"""Whether or not the vectorizer is fitted"""
return self.word_pairs is not None
def fit_transform(self, x, dataset):
"""Fits the vectorizer and returns the feature matrix
Args:
x (iterable): iterable of 3-tuples of the form
(tokenized_utterances, builtin_entities, custom_entities)
dataset (dict): dataset from which x was extracted (needed to
extract the language and the builtin entity scope)
Returns:
:class:`.scipy.sparse.csr_matrix`: A sparse matrix X of shape
(len(x), len(self.word_pairs)) where
X[i, j] = 1.0 if x[i][0] contains the words cooccurrence
(w1, w2) and if self.word_pairs[(w1, w2)] = j
"""
return self.fit(x, dataset).transform(x)
def _enrich_utterance(self, x, builtin_ents, custom_ents):
utterance = get_text_from_chunks(x[DATA])
all_entities = builtin_ents + custom_ents
placeholder_fn = self._placeholder_fn
# Replace entities with placeholders
enriched_utterance = replace_entities_with_placeholders(
utterance, all_entities, placeholder_fn)[1]
# Tokenize
enriched_utterance = tokenize_light(enriched_utterance, self.language)
# Remove the unknownword strings if needed
if self.config.unknown_words_replacement_string:
enriched_utterance = [
t for t in enriched_utterance
if t != self.config.unknown_words_replacement_string
]
return enriched_utterance
@fitted_required
def transform(self, x):
"""Computes the cooccurrence feature matrix.
Args:
x (list of dict): list of utterances
Returns:
:class:`.scipy.sparse.csr_matrix`: A sparse matrix X of shape
(len(x), len(self.word_pairs)) where X[i, j] = 1.0 if
x[i][0] contains the words cooccurrence (w1, w2) and if
self.word_pairs[(w1, w2)] = j
Raises:
NotTrained: when the vectorizer is not fitted
"""
import numpy as np
import scipy.sparse as sp
preprocessed = self._preprocess(x)
utterances = [
self._enrich_utterance(utterance, builtin_ents, custom_ent)
for utterance, builtin_ents, custom_ent in zip(*preprocessed)]
x_coo = sp.dok_matrix((len(x), len(self.word_pairs)), dtype=np.int32)
for i, u in enumerate(utterances):
for p in self._extract_word_pairs(u):
if p in self.word_pairs:
x_coo[i, self.word_pairs[p]] = 1
return x_coo.tocsr()
def _preprocess(self, x):
# Extract all entities on unnormalized data
builtin_ents = [
self.builtin_entity_parser.parse(
get_text_from_chunks(u[DATA]),
self.builtin_entity_scope,
use_cache=True
) for u in x
]
custom_ents = [
self.custom_entity_parser.parse(
get_text_from_chunks(u[DATA]), use_cache=True)
for u in x
]
return x, builtin_ents, custom_ents
def _extract_word_pairs(self, utterance):
if self.config.filter_stop_words:
stop_words = get_stop_words(self.resources)
utterance = [t for t in utterance if t not in stop_words]
pairs = set()
for j, w1 in enumerate(utterance):
max_index = None
if self.config.window_size is not None:
max_index = j + self.config.window_size + 1
for w2 in utterance[j + 1:max_index]:
key = (w1, w2)
if not self.config.keep_order:
key = tuple(sorted(key))
pairs.add(key)
return pairs
@fitted_required
def limit_word_pairs(self, word_pairs):
"""Restrict the vectorizer word pairs to the given word pairs
Args:
word_pairs (iterable of 2-tuples (str, str)): word_pairs to keep
Returns:
:class:`.CooccurrenceVectorizer`: The vectorizer with limited
word pairs
"""
word_pairs = set(word_pairs)
existing_pairs = set(self.word_pairs)
extra_values = word_pairs - existing_pairs
if extra_values:
raise ValueError(
"Invalid word pairs %s, expected values in word_pairs"
% sorted(extra_values))
self._word_pairs = {
ng: new_i for new_i, ng in enumerate(sorted(word_pairs))
}
return self
def _placeholder_fn(self, entity_name):
return "".join(
tokenize_light(str(entity_name), str(self.language))).upper()
@check_persisted_path
def persist(self, path):
path.mkdir()
builtin_entity_scope = None
if self.builtin_entity_scope is not None:
builtin_entity_scope = list(self.builtin_entity_scope)
self_as_dict = {
"language_code": self.language,
"word_pairs": {
i: list(p) for p, i in iteritems(self.word_pairs)
},
"builtin_entity_scope": builtin_entity_scope,
"config": self.config.to_dict()
}
vectorizer_json = json_string(self_as_dict)
vectorizer_path = path / "vectorizer.json"
with vectorizer_path.open(mode="w", encoding="utf8") as f:
f.write(vectorizer_json)
self.persist_metadata(path)
@classmethod
# pylint: disable=protected-access
def from_path(cls, path, **shared):
path = Path(path)
model_path = path / "vectorizer.json"
if not model_path.exists():
raise LoadingError("Missing vectorizer model file: %s"
% model_path.name)
with model_path.open(encoding="utf8") as f:
vectorizer_dict = json.load(f)
config = vectorizer_dict.pop("config")
self = cls(config, **shared)
self._language = vectorizer_dict["language_code"]
self._word_pairs = None
builtin_entity_scope = vectorizer_dict["builtin_entity_scope"]
if builtin_entity_scope is not None:
builtin_entity_scope = set(builtin_entity_scope)
self.builtin_entity_scope = builtin_entity_scope
if vectorizer_dict["word_pairs"]:
self._word_pairs = {
tuple(p): int(i)
for i, p in iteritems(vectorizer_dict["word_pairs"])
}
return self
def _entity_name_to_feature(entity_name, language):
return "entityfeature%s" % "".join(tokenize_light(
entity_name.lower(), language))
def _builtin_entity_to_feature(builtin_entity_label, language):
return "builtinentityfeature%s" % "".join(tokenize_light(
builtin_entity_label.lower(), language))
def _normalize_stem(text, language, resources, use_stemming):
from snips_nlu_utils import normalize
if use_stemming:
return stem(text, language, resources)
return normalize(text)
def _get_word_cluster_features(query_tokens, clusters_name, resources):
if not clusters_name:
return []
ngrams = get_all_ngrams(query_tokens)
cluster_features = []
for ngram in ngrams:
cluster = get_word_cluster(resources, clusters_name).get(
ngram[NGRAM].lower(), None)
if cluster is not None:
cluster_features.append(cluster)
return cluster_features
|
mealpy/math_based/HC.py | thieu1995/mealpy | 162 | 11149603 | <reponame>thieu1995/mealpy
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu" at 10:08, 02/03/2021 %
# %
# Email: <EMAIL> %
# Homepage: https://www.researchgate.net/profile/Nguyen_Thieu2 %
# Github: https://github.com/thieu1995 %
# ------------------------------------------------------------------------------------------------------%
import concurrent.futures as parallel
from functools import partial
import numpy as np
from mealpy.optimizer import Optimizer
class OriginalHC(Optimizer):
"""
The original version of: Hill Climbing (HC)
Noted:
The number of neighbour solutions are equal to user defined
The step size to calculate neighbour is randomized
"""
def __init__(self, problem, epoch=10000, pop_size=100, neighbour_size=50, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size (Harmony Memory Size), default = 100
neighbour_size (int): fixed parameter, sensitive exploitation parameter, Default: 5,
"""
super().__init__(problem, kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = False
self.epoch = epoch
self.pop_size = pop_size
self.neighbour_size = neighbour_size
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
step_size = np.mean(self.problem.ub - self.problem.lb) * np.exp(-2 * (epoch + 1) / self.epoch)
if mode != "sequential":
print("Original HC algorithm only support sequential process!")
exit(0)
pop_neighbours = []
for i in range(0, self.neighbour_size):
pos_new = pos_new = g_best[self.ID_POS] + np.random.normal(0, 1, self.problem.n_dims) * step_size
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
pop_neighbours.append([pos_new, fit_new])
pop_neighbours.append(g_best)
return pop_neighbours
class BaseHC(OriginalHC):
"""
The modified version of: Hill Climbing (HC) based on swarm-of people are trying to climb on the mountain ideas
Noted:
The number of neighbour solutions are equal to population size
The step size to calculate neighbour is randomized and based on ranks of solution.
+ The guys near on top of mountain will move slower than the guys on bottom of mountain.
+ Imagine it is like: exploration when far from global best, and exploitation when near global best
Who on top of mountain first will be the winner. (global optimal)
"""
def __init__(self, problem, epoch=10000, pop_size=100, neighbour_size=50, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size (Harmony Memory Size), default = 100
neighbour_size (int): fixed parameter, sensitive exploitation parameter, Default: 5,
"""
super().__init__(problem, epoch, pop_size, neighbour_size, **kwargs)
self.nfe_per_epoch = pop_size
self.sort_flag = True
self.epoch = epoch
self.pop_size = pop_size
self.neighbour_size = neighbour_size
def create_child(self, idx, pop, g_best, step_size, ranks):
ss = step_size * ranks[idx]
pop_neighbours = []
for j in range(0, self.neighbour_size):
pos_new = pop[idx][self.ID_POS] + np.random.normal(0, 1, self.problem.n_dims) * ss
pos_new = self.amend_position_faster(pos_new)
fit_new = self.get_fitness_position(pos_new)
pop_neighbours.append([pos_new, fit_new])
pop_neighbours.append(g_best)
_, agent = self.get_global_best_solution(pop_neighbours)
return agent
def evolve(self, mode='sequential', epoch=None, pop=None, g_best=None):
"""
Args:
mode (str): 'sequential', 'thread', 'process'
+ 'sequential': recommended for simple and small task (< 10 seconds for calculating objective)
+ 'thread': recommended for IO bound task, or small computing task (< 2 minutes for calculating objective)
+ 'process': recommended for hard and big task (> 2 minutes for calculating objective)
Returns:
[position, fitness value]
"""
pop_copy = pop.copy()
pop_idx = np.array(range(0, self.pop_size))
ranks = np.array(list(range(1, self.pop_size + 1)))
ranks = ranks / sum(ranks)
step_size = np.mean(self.problem.ub - self.problem.lb) * np.exp(-2 * (epoch + 1) / self.epoch)
if mode == "thread":
with parallel.ThreadPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy, g_best=g_best, step_size=step_size, ranks=ranks), pop_idx)
pop_new = [x for x in pop_child]
elif mode == "process":
with parallel.ProcessPoolExecutor() as executor:
pop_child = executor.map(partial(self.create_child, pop=pop_copy, g_best=g_best, step_size=step_size, ranks=ranks), pop_idx)
pop_new = [x for x in pop_child]
else:
pop_new = [self.create_child(idx, pop_copy, g_best, step_size, ranks) for idx in pop_idx]
return pop_new
|
aliyun-python-sdk-privatelink/aliyunsdkprivatelink/request/v20200415/CreateVpcEndpointRequest.py | leafcoder/aliyun-openapi-python-sdk | 1,001 | 11149625 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkprivatelink.endpoint import endpoint_data
class CreateVpcEndpointRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Privatelink', '2020-04-15', 'CreateVpcEndpoint','privatelink')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_SecurityGroupIds(self):
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupIds(self, SecurityGroupIds):
for depth1 in range(len(SecurityGroupIds)):
if SecurityGroupIds[depth1] is not None:
self.add_query_param('SecurityGroupId.' + str(depth1 + 1) , SecurityGroupIds[depth1])
def get_Zones(self):
return self.get_query_params().get('Zone')
def set_Zones(self, Zones):
for depth1 in range(len(Zones)):
if Zones[depth1].get('VSwitchId') is not None:
self.add_query_param('Zone.' + str(depth1 + 1) + '.VSwitchId', Zones[depth1].get('VSwitchId'))
if Zones[depth1].get('ZoneId') is not None:
self.add_query_param('Zone.' + str(depth1 + 1) + '.ZoneId', Zones[depth1].get('ZoneId'))
if Zones[depth1].get('ip') is not None:
self.add_query_param('Zone.' + str(depth1 + 1) + '.ip', Zones[depth1].get('ip'))
def get_ServiceName(self):
return self.get_query_params().get('ServiceName')
def set_ServiceName(self,ServiceName):
self.add_query_param('ServiceName',ServiceName)
def get_DryRun(self):
return self.get_query_params().get('DryRun')
def set_DryRun(self,DryRun):
self.add_query_param('DryRun',DryRun)
def get_EndpointDescription(self):
return self.get_query_params().get('EndpointDescription')
def set_EndpointDescription(self,EndpointDescription):
self.add_query_param('EndpointDescription',EndpointDescription)
def get_EndpointName(self):
return self.get_query_params().get('EndpointName')
def set_EndpointName(self,EndpointName):
self.add_query_param('EndpointName',EndpointName)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_ServiceId(self):
return self.get_query_params().get('ServiceId')
def set_ServiceId(self,ServiceId):
self.add_query_param('ServiceId',ServiceId) |
lib/datasets/kitti.py | mit-drl/Stereo-RCNN | 681 | 11149640 | <filename>lib/datasets/kitti.py
from __future__ import print_function
from __future__ import absolute_import
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# Modified by <NAME> for Stereo RCNN
# --------------------------------------------------------
import xml.dom.minidom as minidom
import os
# import PIL
import numpy as np
import math as m
import scipy.sparse
import subprocess
import math
import cv2
import glob
import uuid
import scipy.io as sio
import xml.etree.ElementTree as ET
import pickle
from .imdb import imdb
from .imdb import ROOT_DIR
from model.utils import kitti_utils
import cPickle
from model.utils.config import cfg
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class kitti(imdb):
def __init__(self, image_set, kitti_path=None):
imdb.__init__(self, 'kitti_' + image_set)
self._image_set = image_set
assert kitti_path is not None
self._kitti_path = kitti_path
self._data_path = os.path.join(self._kitti_path)
self._classes = ('__background__', 'Car')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.png'
self._image_index = self._load_image_set_index_new()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
if image_set == 'train' or image_set == 'val':
prefix = 'validation'
else:
prefix = 'test'
assert os.path.exists(self._kitti_path), \
'kitti path does not exist: {}'.format(self._kitti_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def img_left_path_at(self, i):
'''
Return the absolute path to image i in the image sequence.
'''
return self.img_left_path_from_index(self._image_index[i])
def img_right_path_at(self, i):
'''
Return the absolute path to image i in the image sequence.
'''
return self.img_right_path_from_index(self._image_index[i])
def img_left_path_from_index(self, index):
'''
Construct an image path from the image's "index" identifier.
'''
if self._image_set == 'test':
prefix = 'testing/image_2'
else:
prefix = 'training/image_2'
image_path = os.path.join(self._data_path, prefix,\
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def img_right_path_from_index(self, index):
'''
Construct an image path from the image's "index" identifier.
'''
if self._image_set == 'test':
prefix = 'testing/image_3'
else:
prefix = 'training/image_3'
image_path = os.path.join(self._data_path, prefix,\
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index_new(self):
'''
Load the indexes listed in this dataset's image set file.
'''
if self.name == 'kitti_train':
train_set_file = open('data/kitti/splits/train.txt', 'r')
image_index = train_set_file.read().split('\n')
elif self.name == 'kitti_val':
val_set_file = open('data/kitti/splits/val.txt', 'r')
image_index = val_set_file.read().split('\n')
return image_index
def gt_roidb(self):
'''
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
'''
cache_file = os.path.join(self._kitti_path, self.name + '_gt_roidb.pkl')
print('cache file', cache_file)
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_kitti_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def remove_occluded_keypoints(self, objects, left=True):
'''
Generate the visible range of the bounding box according to
the occlusion relations between all objects
Remove almost totally occluded ones
'''
ix = 0 if left else 1
depth_line = np.zeros(1260, dtype=float)
for i in range(len(objects)):
for col in range(int(objects[i].boxes[ix].box[0]), int(objects[i].boxes[ix].box[2])+1):
pixel = depth_line[col]
if pixel == 0.0:
depth_line[col] = objects[i].pos[2]
elif objects[i].pos[2] < depth_line[col]:
depth_line[col] = (objects[i].pos[2]+pixel)/2.0
for i in range(len(objects)):
objects[i].boxes[ix].visible_left = objects[i].boxes[ix].box[0]
objects[i].boxes[ix].visible_right = objects[i].boxes[ix].box[2]
left_visible = True
right_visible = True
if depth_line[int(objects[i].boxes[ix].box[0])] < objects[i].pos[2]:
left_visible = False
if depth_line[int(objects[i].boxes[ix].box[2])] < objects[i].pos[2]:
right_visible = False
if right_visible == False and left_visible == False:
objects[i].boxes[ix].visible_right = objects[i].boxes[ix].box[0]
objects[i].boxes[ix].keypoints[:] = -1
for col in range(int(objects[i].boxes[ix].box[0]), int(objects[i].boxes[ix].box[2])+1):
if left_visible and depth_line[col] >= objects[i].pos[2]:
objects[i].boxes[ix].visible_right = col
elif right_visible and depth_line[col] < objects[i].pos[2]:
objects[i].boxes[ix].visible_left = col
objects = [x for x in objects if np.sum(x.boxes[ix].keypoints)>-4]
for i in range(len(objects)):
left_kpt = 5000
right_kpt = 0
for j in range(4):
if objects[i].boxes[ix].keypoints[j] != -1:
if objects[i].boxes[ix].keypoints[j] < left_kpt:
left_kpt = objects[i].boxes[ix].keypoints[j]
if objects[i].boxes[ix].keypoints[j] > right_kpt:
right_kpt = objects[i].boxes[ix].keypoints[j]
for j in range(4):
if objects[i].boxes[ix].keypoints[j] != -1:
if objects[i].boxes[ix].keypoints[j] < objects[i].boxes[ix].visible_left-5 or \
objects[i].boxes[ix].keypoints[j] > objects[i].boxes[ix].visible_right+5 or \
objects[i].boxes[ix].keypoints[j] < left_kpt+3 or \
objects[i].boxes[ix].keypoints[j] > right_kpt-3:
objects[i].boxes[ix].keypoints[j] = -1
return objects
def _load_kitti_annotation(self,index):
if self._image_set == 'test':
objects = []
else:
filename = os.path.join(self._data_path, 'training', 'label_2', index + '.txt')
calib_file = os.path.join(self._data_path, 'training', 'calib', index + '.txt')
calib_it = kitti_utils.read_obj_calibration(calib_file)
im_left = cv2.imread(self.img_left_path_from_index(index))
objects_origin = kitti_utils.read_obj_data(filename, calib_it, im_left.shape)
objects = []
objects_origin = self.remove_occluded_keypoints(objects_origin)
objects_origin = self.remove_occluded_keypoints(objects_origin, left=False)
for i in range(len(objects_origin)):
if objects_origin[i].truncate < 0.98 and objects_origin[i].occlusion < 3 and \
(objects_origin[i].boxes[0].box[3] - objects_origin[i].boxes[0].box[1])>10 and \
objects_origin[i].cls in self._classes and \
objects_origin[i].boxes[0].visible_right - objects_origin[i].boxes[0].visible_left > 3 and\
objects_origin[i].boxes[1].visible_right - objects_origin[i].boxes[1].visible_left > 3:
objects.append(objects_origin[i])
f = calib_it.p2[0,0]
cx = calib_it.p2[0,2]
base_line = (calib_it.p2[0,3] - calib_it.p3[0,3])/f
num_objs = len(objects)
boxes_left = np.zeros((num_objs, 4), dtype=np.float32)
boxes_right = np.zeros((num_objs, 4), dtype=np.float32)
boxes_merge = np.zeros((num_objs, 4), dtype=np.float32)
dim_orien = np.zeros((num_objs, 4), dtype=np.float32)
kpts = np.zeros((num_objs, 6), dtype=np.float32)
kpts_right = np.zeros((num_objs, 6), dtype=np.float32)
truncation = np.zeros((num_objs), dtype=np.float32)
occlusion = np.zeros((num_objs), dtype=np.float32)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes),dtype=np.float32)
for i in range(len(objects)):
cls = self._class_to_ind[objects[i].cls]
boxes_left[i,:] = objects[i].boxes[0].box
boxes_right[i,:] = objects[i].boxes[1].box
boxes_merge[i,:] = objects[i].boxes[2].box
dim_orien[i,0:3] = objects[i].dim
dim_orien[i,3] = objects[i].alpha
kpts[i,:4] = objects[i].boxes[0].keypoints
kpts[i,4] = objects[i].boxes[0].visible_left
kpts[i,5] = objects[i].boxes[0].visible_right
kpts_right[i,:4] = objects[i].boxes[1].keypoints
kpts_right[i,4] = objects[i].boxes[1].visible_left
kpts_right[i,5] = objects[i].boxes[1].visible_right
occlusion[i] = objects[i].occlusion
truncation[i] = objects[i].truncate
gt_classes[i] = cls
overlaps[i, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
gt_subclasses = np.zeros((num_objs), dtype=np.int32)
gt_subclasses_flipped = np.zeros((num_objs), dtype=np.int32)
subindexes = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes_flipped = np.zeros((num_objs, self.num_classes), dtype=np.int32)
subindexes = scipy.sparse.csr_matrix(subindexes)
subindexes_flipped = scipy.sparse.csr_matrix(subindexes_flipped)
return {'boxes_left' : boxes_left,
'boxes_right': boxes_right,
'boxes_merge': boxes_merge,
'dim_orien' : dim_orien,
'kpts' : kpts,
'kpts_right' : kpts_right,
'truncation' : truncation,
'occlusion' : occlusion,
'gt_classes': gt_classes,
'igt_subclasses': gt_subclasses,
'gt_subclasses_flipped': gt_subclasses_flipped,
'gt_overlaps' : overlaps,
'gt_subindexes': subindexes,
'gt_subindexes_flipped': subindexes_flipped,
'flipped' : False}
|
3rdParty/V8/v7.9.317/tools/sanitizers/sanitize_pcs.py | rajeev02101987/arangodb | 20,995 | 11149644 | <filename>3rdParty/V8/v7.9.317/tools/sanitizers/sanitize_pcs.py
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Corrects objdump output. The logic is from sancov.py, see comments there."""
# for py2/py3 compatibility
from __future__ import print_function
import sys
for line in sys.stdin:
print('0x%x' % (int(line.strip(), 16) + 4))
|
tensorflow/python/kernel_tests/large_concat_op_test.py | danielgordon10/tensorflow | 101 | 11149650 | <reponame>danielgordon10/tensorflow
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Concat Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class LargeConcatOpTest(tf.test.TestCase):
"""Tests that belong in concat_op_test.py, but run over large tensors."""
def testConcatLargeTensors(self):
# CPU-only test, because it fails on GPUs with <= 4GB memory.
with tf.device("/cpu:0"):
a = tf.ones([2**31 + 6], dtype=tf.int8)
b = tf.zeros([1024], dtype=tf.int8)
onezeros = tf.concat(0, [a, b])
with self.test_session(use_gpu=False):
# TODO(dga): Add more depth to this test to validate correctness,
# not just non-crashingness, once other large tensor fixes have gone in.
_ = onezeros.eval()
if __name__ == "__main__":
tf.test.main()
|
capture/noworkflow/resources/demo/1/step2/simulation.py | raffaelfoidl/noworkflow | 108 | 11149707 | <reponame>raffaelfoidl/noworkflow
import csv
import sys
import matplotlib.pyplot as plt
from simulator import simulate
def run_simulation(data_a, data_b):
return simulate(csv_read(data_a), csv_read(data_b))
def csv_read(f):
return list(csv.reader(open(f, 'rU'), delimiter=':'))
def extract_column(data, column):
return [float(row[column]) for row in data]
def plot(data):
#GetTemperature
#GetPrecipitation
plt.scatter(extract_column(data, 0), extract_column(data, 1), marker='o')
plt.xlabel('Temperature')
plt.ylabel('Precipitation')
plt.savefig("output.png")
#Main Program
plot(run_simulation(sys.argv[1], sys.argv[2]))
|
src/plugins/plugin.py | BeholdersEye/PyBitmessage | 1,583 | 11149709 | <reponame>BeholdersEye/PyBitmessage
# -*- coding: utf-8 -*-
"""
Operating with plugins
"""
import logging
import pkg_resources
logger = logging.getLogger('default')
def get_plugins(group, point='', name=None, fallback=None):
"""
:param str group: plugin group
:param str point: plugin name prefix
:param name: exact plugin name
:param fallback: fallback plugin name
Iterate through plugins (``connect_plugin`` attribute of entry point)
which name starts with ``point`` or equals to ``name``.
If ``fallback`` kwarg specified, plugin with that name yield last.
"""
for ep in pkg_resources.iter_entry_points('bitmessage.' + group):
if name and ep.name == name or not point or ep.name.startswith(point):
try:
plugin = ep.load().connect_plugin
if ep.name == fallback:
_fallback = plugin
else:
yield plugin
except (AttributeError,
ImportError,
ValueError,
pkg_resources.DistributionNotFound,
pkg_resources.UnknownExtra):
logger.debug(
'Problem while loading %s', ep.name, exc_info=True)
continue
try:
yield _fallback
except NameError:
pass
def get_plugin(*args, **kwargs):
"""
:return: first available plugin from :func:`get_plugins` if any.
"""
for plugin in get_plugins(*args, **kwargs):
return plugin
|
test/dual_run.py | TysonHeart/dynomite | 3,380 | 11149714 | #!/usr/bin/env python3
import redis
class ResultMismatchError(Exception):
def __init__(self, r_result, d_result, func, *args):
self.r_result = r_result
self.d_result = d_result
self.func = func
self.args = args
def __str__(self):
ret = "\n\t======Result Mismatch=======\n"
ret += "\tQuery: %s %s" % (self.func, str(self.args))
ret += "\n\t===========================\n"
ret += "\tRedis: %s" % str(self.r_result)
ret += "\n\t===========================\n"
ret += "\tDyno: %s" % str(self.d_result)
return ret
class dual_run():
def __init__(self, standalone_redis, dyno_cluster, debug=None):
self.standalone_redis = standalone_redis
self.dyno_cluster = dyno_cluster
self.redis_conn = standalone_redis.get_connection()
self.dyno_conn = dyno_cluster.get_connection()
self.debug = debug
self.sort_before_cmp = False
# If 'self.sort_before_cmp' is True, we sort the return values (if they're of the
# list type) from Dynomite and Redis before comparing them so that we have ordered
# comparison.
def set_sort_before_compare(self, should_sort):
self.sort_before_cmp = should_sort
# Returns the underlying DynoCluster object
def get_dynomite_cluster(self):
return self.dyno_cluster
def ensure_underlying_dyno_conn_is_multi_dc(self):
self.dyno_conn = self.dyno_cluster.get_connection_to_multi_rack_dc()
assert self.dyno_conn != None , "Could not obtain connection to multi-rack DC"
def run_verify(self, func, *args):
r_result = None
d_result = None
r_func = getattr(self.redis_conn, func)
d_func = getattr(self.dyno_conn, func)
r_result = r_func(*args)
i = 0
retry_limit = 3
while i < retry_limit:
try:
d_result = d_func(*args)
if i > 0:
print("\tSucceeded in attempt {}".format(i+1))
break
except redis.exceptions.ResponseError as e:
if "Peer Node is not connected" in str(e):
i = i + 1
print("\tGot error '{}' ... Retry effort {}/{}\n\tQuery '{} {}'".format(e, i, retry_limit, func, str(args)))
continue
print("\tGot error '{}'\n\tQuery '{} {}'".format(e, func, str(args)))
break
if self.debug:
print("Query: %s %s" % (func, str(args)))
print("Redis result: %s" % str(r_result))
print("Dyno result: %s" % str(d_result))
if (self.sort_before_cmp and isinstance(r_result, list)):
r_result.sort()
d_result.sort()
if r_result != d_result:
raise ResultMismatchError(r_result, d_result, func, *args)
return d_result
def run_dynomite_only(self, func, *args):
d_result = None
d_func = getattr(self.dyno_conn, func)
i = 0
retry_limit = 3
while i < retry_limit:
try:
d_result = d_func(*args)
if i > 0:
print("\tSucceeded in attempt {}".format(i+1))
break
except redis.exceptions.ResponseError as e:
if "Peer Node is not connected" in str(e):
i = i + 1
print("\tGot error '{}' ... Retry effort {}/{}\n\tQuery '{} {}'".format(e, i, retry_limit, func, str(args)))
continue
print("\tGot error '{}'\n\tQuery '{} {}'".format(e, func, str(args)))
break
if self.debug:
print("Query: %s %s" % (func, str(args)))
print("Dyno result: %s" % str(d_result))
return d_result
def run_redis_only(self, func, *args):
r_result = None
r_func = getattr(self.redis_conn, func)
r_result = r_func(*args)
if self.debug:
print("Query: %s %s" % (func, str(args)))
print("Redis result: %s" % str(r_result))
return r_result
|
tools/rst_lint/run.py | mindspore-ai/docs | 288 | 11149729 | """The restructuredtext linter."""
import sys
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives import register_directive
from docutils.parsers.rst.roles import register_generic_role
from sphinx.ext.autodoc.directive import AutodocDirective
from sphinx.domains.python import PyCurrentModule
from sphinx.directives.other import TocTree
from restructuredtext_lint.cli import main
class CustomDirective(Directive):
"""Base class of customized directives for python domains in sphinx."""
has_content = True
required_arguments = 1
optional_arguments = 3
final_argument_whitespace = True
def run(self):
"""run method."""
self.assert_has_content()
text = '\n'.join(self.content)
classes = []
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class CustomDirectiveNoNested(CustomDirective):
"""Customizing CustomDirective with nonest."""
def run(self):
self.assert_has_content()
text = '\n'.join(self.content)
classes = []
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
return [node]
class Autoclass(AutodocDirective):
"""Customizing automodule."""
def run(self):
"""run method."""
text = '\n'.join(self.content)
classes = []
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
return [node]
class Toctree(TocTree):
"""Customizing toctree."""
def run(self):
"""run method."""
text = '\n'.join(self.content)
if self.arguments:
classes = directives.class_option(self.arguments[0])
else:
classes = []
node = nodes.container(text)
node['classes'].extend(classes)
self.add_name(node)
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class CurrentModule(PyCurrentModule):
"""Customizing currentmodule."""
has_content = False
required_arguments = 1
optional_arguments = 3
final_argument_whitespace = False
def run(self):
"""run method."""
return []
# Register directive.
register_directive('py:class', CustomDirective)
register_directive('py:method', CustomDirective)
register_directive('py:function', CustomDirective)
register_directive('py:property', CustomDirective)
register_directive('py:data', CustomDirective)
register_directive('py:obj', CustomDirective)
register_directive('automodule', Autoclass)
register_directive('autoclass', Autoclass)
register_directive('autofunction', Autoclass)
register_directive('toctree', Toctree)
register_directive('autosummary', CustomDirectiveNoNested)
register_directive('msplatformautosummary', CustomDirectiveNoNested)
register_directive('msnoteautosummary', CustomDirectiveNoNested)
register_directive('cnmsautosummary', CustomDirectiveNoNested)
register_directive('cnmsplatformautosummary', CustomDirectiveNoNested)
register_directive('cnmsnoteautosummary', CustomDirectiveNoNested)
register_directive('currentmodule', CurrentModule)
# Register roles.
register_generic_role('class', nodes.literal)
register_generic_role('func', nodes.literal)
register_generic_role('doc', nodes.literal)
register_generic_role('py:obj', nodes.literal)
if __name__ == "__main__":
sys.exit(main())
|
libcloudforensics/providers/gcp/internal/storagetransfer.py | zkck/cloud-forensics-utils | 241 | 11149739 | # -*- coding: utf-8 -*-
# Copyright 2021 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Storage Transfer functionalities."""
from typing import TYPE_CHECKING, Dict, Any, Optional
import datetime
import time
from libcloudforensics import errors
from libcloudforensics import logging_utils
from libcloudforensics.providers.aws.internal import account
from libcloudforensics.providers.gcp.internal import common
from libcloudforensics.providers.utils.storage_utils import SplitStoragePath
logging_utils.SetUpLogger(__name__)
logger = logging_utils.GetLogger(__name__)
if TYPE_CHECKING:
import googleapiclient
class GoogleCloudStorageTransfer:
"""Class to call Google Cloud Storage Transfer APIs.
Attributes:
gcst_api_client: Client to interact with GCST APIs.
project_id: Google Cloud project ID.
"""
CLOUD_STORAGE_TRANSFER_API_VERSION = 'v1'
def __init__(self, project_id: Optional[str] = None) -> None:
"""Initialize the GoogleCloudStorageTransfer object.
Args:
project_id (str): Optional. Google Cloud project ID.
"""
self.gcst_api_client = None
self.project_id = project_id
def GcstApi(self) -> 'googleapiclient.discovery.Resource':
"""Get a Google Cloud Storage Transfer service object.
Returns:
googleapiclient.discovery.Resource: A Google Cloud Storage Transfer
service object.
"""
if self.gcst_api_client:
return self.gcst_api_client
self.gcst_api_client = common.CreateService(
'storagetransfer', self.CLOUD_STORAGE_TRANSFER_API_VERSION)
return self.gcst_api_client
def S3ToGCS(self, s3_path: str, zone: str, gcs_path: str) -> Dict[str, Any]:
"""Copy an S3 object to a GCS bucket.
Args:
s3_path (str): File path to the S3 resource.
Ex: s3://test/bucket/obj
zone (str): The AWS zone in which resources are located.
Available zones are listed at:
https://cloud.google.com/storage-transfer/docs/create-manage-transfer-program#s3-to-cloud # pylint: disable=line-too-long
gcs_path (str): File path to the target GCS bucket.
Ex: gs://bucket/folder
Returns:
Dict: An API operation object for a Google Cloud Storage Transfer operation.
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations/list # pylint: disable=line-too-long
Raises:
TransferCreationError: If the transfer couldn't be created.
TransferExecutionError: If the transfer couldn't be run.
"""
aws_creds = account.AWSAccount(zone).session.get_credentials()
if (aws_creds is None or aws_creds.access_key is None or
aws_creds.access_key.startswith('ASIA')):
raise errors.TransferCreationError(
'Could not create transfer. No long term AWS credentials available',
__name__)
s3_bucket, s3_path = SplitStoragePath(s3_path)
gcs_bucket, gcs_path = SplitStoragePath(gcs_path)
if not gcs_path.endswith('/'):
gcs_path = gcs_path + '/'
# Don't specify a path if we're writing to the bucket root.
if gcs_path == '/':
gcs_path = ''
today = datetime.datetime.now()
transfer_job_body = {
'projectId': self.project_id,
'description': 'created_by_cfu',
'transferSpec': {
'objectConditions': {
'includePrefixes': [s3_path]
},
'awsS3DataSource': {
'bucketName': s3_bucket,
'awsAccessKey': {
'accessKeyId': aws_creds.access_key,
'secretAccessKey': aws_creds.secret_key
}
},
'gcsDataSink': {
'bucketName': gcs_bucket, 'path': gcs_path
}
},
'schedule': {
'scheduleStartDate': {
'year': today.year, 'month': today.month, 'day': today.day
},
'scheduleEndDate': {
'year': today.year, 'month': today.month, 'day': today.day
},
'endTimeOfDay': {}
},
'status': 'ENABLED'
}
logger.info('Creating transfer job')
gcst_jobs = self.GcstApi().transferJobs()
create_request = gcst_jobs.create(body=transfer_job_body)
transfer_job = create_request.execute()
logger.info('Job created: {0:s}'.format(str(transfer_job)))
job_name = transfer_job.get('name', None)
if job_name is None:
raise errors.TransferCreationError(
'Could not create transfer. Job output: {0:s}'.format(
str(transfer_job)),
__name__)
logger.info('Job created: {0:s}'.format(job_name))
gcst_transfers = self.GcstApi().transferOperations()
filter_string = ('{{"projectId": "{0:s}", "jobNames": ["{1:s}"]}}').format(
self.project_id, job_name)
status = {}
while 'operations' not in status:
time.sleep(5)
status = gcst_transfers.list(
name='transferOperations', filter=filter_string).execute()
logger.info('Waiting for transfer to start...')
logger.info('Job status: {0:s}'.format(str(status)))
while not status['operations'][0].get('done'):
time.sleep(5)
status = gcst_transfers.list(
name='transferOperations', filter=filter_string).execute()
logger.info('Waiting to finish...')
logger.info(status)
error = status['operations'][0].get('error', None)
if error:
raise errors.TransferExecutionError(
'Could not execute transfer. Job output: {0:s}'.format(str(status)),
__name__)
counters = status['operations'][0].get('metadata', {}).get('counters', {})
logger.info(
'Transferred {0:s}/{1:s} files ({2:s}/{3:s} bytes).'.format(
counters.get('objectsFoundFromSource', '0'),
counters.get('objectsCopiedToSink', '0'),
counters.get('bytesFoundFromSource', '0'),
counters.get('bytesCopiedToSink', '0')))
logger.info(
'Skipped {0:s} files ({1:s} bytes).'.format(
counters.get('objectsFromSourceSkippedBySync', '0'),
counters.get('bytesFromSourceSkippedBySync', '0')))
return status
|
axes/migrations/0002_auto_20151217_2044.py | AMDINDOWS/django-axes | 831 | 11149741 | <gh_stars>100-1000
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("axes", "0001_initial")]
operations = [
migrations.AlterField(
model_name="accessattempt",
name="ip_address",
field=models.GenericIPAddressField(
db_index=True, null=True, verbose_name="IP Address"
),
),
migrations.AlterField(
model_name="accessattempt",
name="trusted",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name="accessattempt",
name="user_agent",
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name="accessattempt",
name="username",
field=models.CharField(db_index=True, max_length=255, null=True),
),
migrations.AlterField(
model_name="accesslog",
name="ip_address",
field=models.GenericIPAddressField(
db_index=True, null=True, verbose_name="IP Address"
),
),
migrations.AlterField(
model_name="accesslog",
name="trusted",
field=models.BooleanField(db_index=True, default=False),
),
migrations.AlterField(
model_name="accesslog",
name="user_agent",
field=models.CharField(db_index=True, max_length=255),
),
migrations.AlterField(
model_name="accesslog",
name="username",
field=models.CharField(db_index=True, max_length=255, null=True),
),
]
|
AppServer/google/appengine/ext/analytics/entity.py | loftwah/appscale | 790 | 11149780 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Helper file to manipulate entity keys and names."""
def EntityKind(key):
"""Given entity primary key as Reference Proto, returns entity kind.
Args:
key: primary key of entity in ReferenceProto form.
Returns:
Kind of entity in string format. Returns '' if
kind cannot be determined in some unexpected scenario.
"""
if key.path().element_list():
return key.path().element_list()[-1].type()
else:
return ''
def EntityGroupKind(key):
"""Given entity primary key as Reference Proto, returns kind of entity group.
Args:
key: primary key of entity in ReferenceProto form.
Returns:
Kind of entity group that entity belongs to in string format.
"""
return key.path().element(0).type()
def EntityListKind(keylist):
"""Given list of entity keys, return entity kind.
Args:
keylist: list of primary keys of entities in ReferenceProto form.
Returns:
Kind of entity. Returns 'None' if list is empty and 'Multi' if
entities in the list are of different kinds.
"""
kinds = map(EntityKind, keylist)
unique_kinds = set(kinds)
numkinds = len(unique_kinds)
if numkinds > 1:
return 'Multi'
elif numkinds == 1:
return unique_kinds.pop()
else:
return 'None'
def EntityGroupName(entity):
"""Given entity primary key as Reference Proto, returns entity group.
Args:
entity: primary key of entity in ReferenceProto form
Returns:
Name of entitygroup in string format.
"""
element = entity.path().element(0)
if element.has_id():
return str(element.id())
elif element.has_name():
return element.name()
else:
return 'None'
def EntityFullName(entity):
"""Given entity primary key as a Reference Proto, returns full name.
This is a concatenation of entity information along the entire
path, and includes entity kind and entity name (or id) at each level.
Args:
entity: primary key of entity in ReferenceProto form
Returns:
Full name of entity in string format with dots delimiting each element in
the path. Each element is represented as 'entity_kind:entity_id' or
'entity_kind:entity_name' as applicable.
"""
names = []
for element in entity.path().element_list():
if element.has_id():
name = '%s:%s' %(element.type(), str(element.id()))
elif element.has_name():
name = '%s:%s' %(element.type(), str(element.name()))
else:
name = '%s:None' %(element.type())
names.append(name)
fullname = '.'.join(names)
return fullname
|
tests/test_process_executor_forkserver.py | hoodmane/loky | 248 | 11149793 | <reponame>hoodmane/loky
import sys
from loky import process_executor
from loky.backend import get_context
from ._executor_mixin import ExecutorMixin
if (sys.version_info[:2] > (3, 3)
and sys.platform != "win32"
and not hasattr(sys, "pypy_version_info")):
# XXX: the forkserver backend is broken with pypy3.
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = process_executor.ProcessPoolExecutor
context = get_context('forkserver')
from ._test_process_executor import ExecutorShutdownTest
class TestsProcessPoolForkserverShutdown(ProcessPoolForkserverMixin,
ExecutorShutdownTest):
def _prime_executor(self):
pass
from ._test_process_executor import WaitTests
class TestsProcessPoolForkserverWait(ProcessPoolForkserverMixin,
WaitTests):
pass
from ._test_process_executor import AsCompletedTests
class TestsProcessPoolForkserverAsCompleted(ProcessPoolForkserverMixin,
AsCompletedTests):
pass
from ._test_process_executor import ExecutorTest
class TestsProcessPoolForkserverExecutor(ProcessPoolForkserverMixin,
ExecutorTest):
pass
from ._test_process_executor import ExecutorTest
|
pwnlib/constants/linux/aarch64.py | IMULMUL/python3-pwntools | 325 | 11149834 | <gh_stars>100-1000
from pwnlib.constants.constant import Constant
__NR_io_setup = Constant('__NR_io_setup', 0)
__NR_io_destroy = Constant('__NR_io_destroy', 1)
__NR_io_submit = Constant('__NR_io_submit', 2)
__NR_io_cancel = Constant('__NR_io_cancel', 3)
__NR_io_getevents = Constant('__NR_io_getevents', 4)
__NR_setxattr = Constant('__NR_setxattr', 5)
__NR_lsetxattr = Constant('__NR_lsetxattr', 6)
__NR_fsetxattr = Constant('__NR_fsetxattr', 7)
__NR_getxattr = Constant('__NR_getxattr', 8)
__NR_lgetxattr = Constant('__NR_lgetxattr', 9)
__NR_fgetxattr = Constant('__NR_fgetxattr', 10)
__NR_listxattr = Constant('__NR_listxattr', 11)
__NR_llistxattr = Constant('__NR_llistxattr', 12)
__NR_flistxattr = Constant('__NR_flistxattr', 13)
__NR_removexattr = Constant('__NR_removexattr', 14)
__NR_lremovexattr = Constant('__NR_lremovexattr', 15)
__NR_fremovexattr = Constant('__NR_fremovexattr', 16)
__NR_getcwd = Constant('__NR_getcwd', 17)
__NR_lookup_dcookie = Constant('__NR_lookup_dcookie', 18)
__NR_eventfd2 = Constant('__NR_eventfd2', 19)
__NR_epoll_create1 = Constant('__NR_epoll_create1', 20)
__NR_epoll_ctl = Constant('__NR_epoll_ctl', 21)
__NR_epoll_pwait = Constant('__NR_epoll_pwait', 22)
__NR_dup = Constant('__NR_dup', 23)
__NR_dup3 = Constant('__NR_dup3', 24)
__NR_fcntl = Constant('__NR_fcntl', 25)
__NR_inotify_init1 = Constant('__NR_inotify_init1', 26)
__NR_inotify_add_watch = Constant('__NR_inotify_add_watch', 27)
__NR_inotify_rm_watch = Constant('__NR_inotify_rm_watch', 28)
__NR_ioctl = Constant('__NR_ioctl', 29)
__NR_ioprio_set = Constant('__NR_ioprio_set', 30)
__NR_ioprio_get = Constant('__NR_ioprio_get', 31)
__NR_flock = Constant('__NR_flock', 32)
__NR_mknodat = Constant('__NR_mknodat', 33)
__NR_mkdirat = Constant('__NR_mkdirat', 34)
__NR_unlinkat = Constant('__NR_unlinkat', 35)
__NR_symlinkat = Constant('__NR_symlinkat', 36)
__NR_linkat = Constant('__NR_linkat', 37)
__NR_renameat = Constant('__NR_renameat', 38)
__NR_umount2 = Constant('__NR_umount2', 39)
__NR_mount = Constant('__NR_mount', 40)
__NR_pivot_root = Constant('__NR_pivot_root', 41)
__NR_nfsservctl = Constant('__NR_nfsservctl', 42)
__NR_statfs = Constant('__NR_statfs', 43)
__NR_fstatfs = Constant('__NR_fstatfs', 44)
__NR_truncate = Constant('__NR_truncate', 45)
__NR_ftruncate = Constant('__NR_ftruncate', 46)
__NR_fallocate = Constant('__NR_fallocate', 47)
__NR_faccessat = Constant('__NR_faccessat', 48)
__NR_chdir = Constant('__NR_chdir', 49)
__NR_fchdir = Constant('__NR_fchdir', 50)
__NR_chroot = Constant('__NR_chroot', 51)
__NR_fchmod = Constant('__NR_fchmod', 52)
__NR_fchmodat = Constant('__NR_fchmodat', 53)
__NR_fchownat = Constant('__NR_fchownat', 54)
__NR_fchown = Constant('__NR_fchown', 55)
__NR_openat = Constant('__NR_openat', 56)
__NR_close = Constant('__NR_close', 57)
__NR_vhangup = Constant('__NR_vhangup', 58)
__NR_pipe2 = Constant('__NR_pipe2', 59)
__NR_quotactl = Constant('__NR_quotactl', 60)
__NR_getdents64 = Constant('__NR_getdents64', 61)
__NR_lseek = Constant('__NR_lseek', 62)
__NR_read = Constant('__NR_read', 63)
__NR_write = Constant('__NR_write', 64)
__NR_readv = Constant('__NR_readv', 65)
__NR_writev = Constant('__NR_writev', 66)
__NR_pread64 = Constant('__NR_pread64', 67)
__NR_pwrite64 = Constant('__NR_pwrite64', 68)
__NR_preadv = Constant('__NR_preadv', 69)
__NR_pwritev = Constant('__NR_pwritev', 70)
__NR_sendfile = Constant('__NR_sendfile', 71)
__NR_pselect6 = Constant('__NR_pselect6', 72)
__NR_ppoll = Constant('__NR_ppoll', 73)
__NR_signalfd4 = Constant('__NR_signalfd4', 74)
__NR_vmsplice = Constant('__NR_vmsplice', 75)
__NR_splice = Constant('__NR_splice', 76)
__NR_tee = Constant('__NR_tee', 77)
__NR_readlinkat = Constant('__NR_readlinkat', 78)
__NR_fstatat64 = Constant('__NR_fstatat64', 79)
__NR_fstat = Constant('__NR_fstat', 80)
__NR_sync = Constant('__NR_sync', 81)
__NR_fsync = Constant('__NR_fsync', 82)
__NR_fdatasync = Constant('__NR_fdatasync', 83)
__NR_sync_file_range2 = Constant('__NR_sync_file_range2', 84)
__NR_timerfd_create = Constant('__NR_timerfd_create', 85)
__NR_timerfd_settime = Constant('__NR_timerfd_settime', 86)
__NR_timerfd_gettime = Constant('__NR_timerfd_gettime', 87)
__NR_utimensat = Constant('__NR_utimensat', 88)
__NR_acct = Constant('__NR_acct', 89)
__NR_capget = Constant('__NR_capget', 90)
__NR_capset = Constant('__NR_capset', 91)
__NR_personality = Constant('__NR_personality', 92)
__NR_exit = Constant('__NR_exit', 93)
__NR_exit_group = Constant('__NR_exit_group', 94)
__NR_waitid = Constant('__NR_waitid', 95)
__NR_set_tid_address = Constant('__NR_set_tid_address', 96)
__NR_unshare = Constant('__NR_unshare', 97)
__NR_futex = Constant('__NR_futex', 98)
__NR_set_robust_list = Constant('__NR_set_robust_list', 99)
__NR_get_robust_list = Constant('__NR_get_robust_list', 100)
__NR_nanosleep = Constant('__NR_nanosleep', 101)
__NR_getitimer = Constant('__NR_getitimer', 102)
__NR_setitimer = Constant('__NR_setitimer', 103)
__NR_kexec_load = Constant('__NR_kexec_load', 104)
__NR_init_module = Constant('__NR_init_module', 105)
__NR_delete_module = Constant('__NR_delete_module', 106)
__NR_timer_create = Constant('__NR_timer_create', 107)
__NR_timer_gettime = Constant('__NR_timer_gettime', 108)
__NR_timer_getoverrun = Constant('__NR_timer_getoverrun', 109)
__NR_timer_settime = Constant('__NR_timer_settime', 110)
__NR_timer_delete = Constant('__NR_timer_delete', 111)
__NR_clock_settime = Constant('__NR_clock_settime', 112)
__NR_clock_gettime = Constant('__NR_clock_gettime', 113)
__NR_clock_getres = Constant('__NR_clock_getres', 114)
__NR_clock_nanosleep = Constant('__NR_clock_nanosleep', 115)
__NR_syslog = Constant('__NR_syslog', 116)
__NR_ptrace = Constant('__NR_ptrace', 117)
__NR_sched_setparam = Constant('__NR_sched_setparam', 118)
__NR_sched_setscheduler = Constant('__NR_sched_setscheduler', 119)
__NR_sched_getscheduler = Constant('__NR_sched_getscheduler', 120)
__NR_sched_getparam = Constant('__NR_sched_getparam', 121)
__NR_sched_setaffinity = Constant('__NR_sched_setaffinity', 122)
__NR_sched_getaffinity = Constant('__NR_sched_getaffinity', 123)
__NR_sched_yield = Constant('__NR_sched_yield', 124)
__NR_sched_get_priority_max = Constant('__NR_sched_get_priority_max', 125)
__NR_sched_get_priority_min = Constant('__NR_sched_get_priority_min', 126)
__NR_sched_rr_get_interval = Constant('__NR_sched_rr_get_interval', 127)
__NR_restart_syscall = Constant('__NR_restart_syscall', 128)
__NR_kill = Constant('__NR_kill', 129)
__NR_tkill = Constant('__NR_tkill', 130)
__NR_tgkill = Constant('__NR_tgkill', 131)
__NR_sigaltstack = Constant('__NR_sigaltstack', 132)
__NR_rt_sigsuspend = Constant('__NR_rt_sigsuspend', 133)
__NR_rt_sigaction = Constant('__NR_rt_sigaction', 134)
__NR_rt_sigprocmask = Constant('__NR_rt_sigprocmask', 135)
__NR_rt_sigpending = Constant('__NR_rt_sigpending', 136)
__NR_rt_sigtimedwait = Constant('__NR_rt_sigtimedwait', 137)
__NR_rt_sigqueueinfo = Constant('__NR_rt_sigqueueinfo', 138)
__NR_rt_sigreturn = Constant('__NR_rt_sigreturn', 139)
__NR_setpriority = Constant('__NR_setpriority', 140)
__NR_getpriority = Constant('__NR_getpriority', 141)
__NR_reboot = Constant('__NR_reboot', 142)
__NR_setregid = Constant('__NR_setregid', 143)
__NR_setgid = Constant('__NR_setgid', 144)
__NR_setreuid = Constant('__NR_setreuid', 145)
__NR_setuid = Constant('__NR_setuid', 146)
__NR_setresuid = Constant('__NR_setresuid', 147)
__NR_getresuid = Constant('__NR_getresuid', 148)
__NR_setresgid = Constant('__NR_setresgid', 149)
__NR_getresgid = Constant('__NR_getresgid', 150)
__NR_setfsuid = Constant('__NR_setfsuid', 151)
__NR_setfsgid = Constant('__NR_setfsgid', 152)
__NR_times = Constant('__NR_times', 153)
__NR_setpgid = Constant('__NR_setpgid', 154)
__NR_getpgid = Constant('__NR_getpgid', 155)
__NR_getsid = Constant('__NR_getsid', 156)
__NR_setsid = Constant('__NR_setsid', 157)
__NR_getgroups = Constant('__NR_getgroups', 158)
__NR_setgroups = Constant('__NR_setgroups', 159)
__NR_uname = Constant('__NR_uname', 160)
__NR_sethostname = Constant('__NR_sethostname', 161)
__NR_setdomainname = Constant('__NR_setdomainname', 162)
__NR_getrlimit = Constant('__NR_getrlimit', 163)
__NR_setrlimit = Constant('__NR_setrlimit', 164)
__NR_getrusage = Constant('__NR_getrusage', 165)
__NR_umask = Constant('__NR_umask', 166)
__NR_prctl = Constant('__NR_prctl', 167)
__NR_getcpu = Constant('__NR_getcpu', 168)
__NR_gettimeofday = Constant('__NR_gettimeofday', 169)
__NR_settimeofday = Constant('__NR_settimeofday', 170)
__NR_adjtimex = Constant('__NR_adjtimex', 171)
__NR_getpid = Constant('__NR_getpid', 172)
__NR_getppid = Constant('__NR_getppid', 173)
__NR_getuid = Constant('__NR_getuid', 174)
__NR_geteuid = Constant('__NR_geteuid', 175)
__NR_getgid = Constant('__NR_getgid', 176)
__NR_getegid = Constant('__NR_getegid', 177)
__NR_gettid = Constant('__NR_gettid', 178)
__NR_sysinfo = Constant('__NR_sysinfo', 179)
__NR_mq_open = Constant('__NR_mq_open', 180)
__NR_mq_unlink = Constant('__NR_mq_unlink', 181)
__NR_mq_timedsend = Constant('__NR_mq_timedsend', 182)
__NR_mq_timedreceive = Constant('__NR_mq_timedreceive', 183)
__NR_mq_notify = Constant('__NR_mq_notify', 184)
__NR_mq_getsetattr = Constant('__NR_mq_getsetattr', 185)
__NR_msgget = Constant('__NR_msgget', 186)
__NR_msgctl = Constant('__NR_msgctl', 187)
__NR_msgrcv = Constant('__NR_msgrcv', 188)
__NR_msgsnd = Constant('__NR_msgsnd', 189)
__NR_semget = Constant('__NR_semget', 190)
__NR_semctl = Constant('__NR_semctl', 191)
__NR_semtimedop = Constant('__NR_semtimedop', 192)
__NR_semop = Constant('__NR_semop', 193)
__NR_shmget = Constant('__NR_shmget', 194)
__NR_shmctl = Constant('__NR_shmctl', 195)
__NR_shmat = Constant('__NR_shmat', 196)
__NR_shmdt = Constant('__NR_shmdt', 197)
__NR_socket = Constant('__NR_socket', 198)
__NR_socketpair = Constant('__NR_socketpair', 199)
__NR_bind = Constant('__NR_bind', 200)
__NR_listen = Constant('__NR_listen', 201)
__NR_accept = Constant('__NR_accept', 202)
__NR_connect = Constant('__NR_connect', 203)
__NR_getsockname = Constant('__NR_getsockname', 204)
__NR_getpeername = Constant('__NR_getpeername', 205)
__NR_sendto = Constant('__NR_sendto', 206)
__NR_recvfrom = Constant('__NR_recvfrom', 207)
__NR_setsockopt = Constant('__NR_setsockopt', 208)
__NR_getsockopt = Constant('__NR_getsockopt', 209)
__NR_shutdown = Constant('__NR_shutdown', 210)
__NR_sendmsg = Constant('__NR_sendmsg', 211)
__NR_recvmsg = Constant('__NR_recvmsg', 212)
__NR_readahead = Constant('__NR_readahead', 213)
__NR_brk = Constant('__NR_brk', 214)
__NR_munmap = Constant('__NR_munmap', 215)
__NR_mremap = Constant('__NR_mremap', 216)
__NR_add_key = Constant('__NR_add_key', 217)
__NR_request_key = Constant('__NR_request_key', 218)
__NR_keyctl = Constant('__NR_keyctl', 219)
__NR_clone = Constant('__NR_clone', 220)
__NR_execve = Constant('__NR_execve', 221)
__NR_mmap = Constant('__NR_mmap', 222)
__NR_fadvise64 = Constant('__NR_fadvise64', 223)
__NR_swapon = Constant('__NR_swapon', 224)
__NR_swapoff = Constant('__NR_swapoff', 225)
__NR_mprotect = Constant('__NR_mprotect', 226)
__NR_msync = Constant('__NR_msync', 227)
__NR_mlock = Constant('__NR_mlock', 228)
__NR_munlock = Constant('__NR_munlock', 229)
__NR_mlockall = Constant('__NR_mlockall', 230)
__NR_munlockall = Constant('__NR_munlockall', 231)
__NR_mincore = Constant('__NR_mincore', 232)
__NR_madvise = Constant('__NR_madvise', 233)
__NR_remap_file_pages = Constant('__NR_remap_file_pages', 234)
__NR_mbind = Constant('__NR_mbind', 235)
__NR_get_mempolicy = Constant('__NR_get_mempolicy', 236)
__NR_set_mempolicy = Constant('__NR_set_mempolicy', 237)
__NR_migrate_pages = Constant('__NR_migrate_pages', 238)
__NR_move_pages = Constant('__NR_move_pages', 239)
__NR_rt_tgsigqueueinfo = Constant('__NR_rt_tgsigqueueinfo', 240)
__NR_perf_event_open = Constant('__NR_perf_event_open', 241)
__NR_accept4 = Constant('__NR_accept4', 242)
__NR_recvmmsg = Constant('__NR_recvmmsg', 243)
__NR_arch_specific_syscall = Constant('__NR_arch_specific_syscall', 244)
__NR_wait4 = Constant('__NR_wait4', 260)
__NR_prlimit64 = Constant('__NR_prlimit64', 261)
__NR_fanotify_init = Constant('__NR_fanotify_init', 262)
__NR_fanotify_mark = Constant('__NR_fanotify_mark', 263)
__NR_name_to_handle_at = Constant('__NR_name_to_handle_at', 264)
__NR_open_by_handle_at = Constant('__NR_open_by_handle_at', 265)
__NR_clock_adjtime = Constant('__NR_clock_adjtime', 266)
__NR_syncfs = Constant('__NR_syncfs', 267)
__NR_setns = Constant('__NR_setns', 268)
__NR_sendmmsg = Constant('__NR_sendmmsg', 269)
__NR_process_vm_readv = Constant('__NR_process_vm_readv', 270)
__NR_process_vm_writev = Constant('__NR_process_vm_writev', 271)
__NR_kcmp = Constant('__NR_kcmp', 272)
__NR_finit_module = Constant('__NR_finit_module', 273)
__NR_open = Constant('__NR_open', 1024)
__NR_link = Constant('__NR_link', 1025)
__NR_unlink = Constant('__NR_unlink', 1026)
__NR_mknod = Constant('__NR_mknod', 1027)
__NR_chmod = Constant('__NR_chmod', 1028)
__NR_chown = Constant('__NR_chown', 1029)
__NR_mkdir = Constant('__NR_mkdir', 1030)
__NR_rmdir = Constant('__NR_rmdir', 1031)
__NR_lchown = Constant('__NR_lchown', 1032)
__NR_access = Constant('__NR_access', 1033)
__NR_rename = Constant('__NR_rename', 1034)
__NR_readlink = Constant('__NR_readlink', 1035)
__NR_symlink = Constant('__NR_symlink', 1036)
__NR_utimes = Constant('__NR_utimes', 1037)
__NR_stat = Constant('__NR_stat', 1038)
__NR_lstat = Constant('__NR_lstat', 1039)
__NR_pipe = Constant('__NR_pipe', 1040)
__NR_dup2 = Constant('__NR_dup2', 1041)
__NR_epoll_create = Constant('__NR_epoll_create', 1042)
__NR_inotify_init = Constant('__NR_inotify_init', 1043)
__NR_eventfd = Constant('__NR_eventfd', 1044)
__NR_signalfd = Constant('__NR_signalfd', 1045)
__NR_sendfile64 = Constant('__NR_sendfile64', 1046)
__NR_ftruncate64 = Constant('__NR_ftruncate64', 1047)
__NR_truncate64 = Constant('__NR_truncate64', 1048)
__NR_stat64 = Constant('__NR_stat64', 1049)
__NR_lstat64 = Constant('__NR_lstat64', 1050)
__NR_fstat64 = Constant('__NR_fstat64', 1051)
__NR_fcntl64 = Constant('__NR_fcntl64', 1052)
__NR_newfstatat = Constant('__NR_newfstatat', 1054)
__NR_fstatfs64 = Constant('__NR_fstatfs64', 1055)
__NR_statfs64 = Constant('__NR_statfs64', 1056)
__NR_lseek64 = Constant('__NR_lseek64', 1057)
__NR_mmap64 = Constant('__NR_mmap64', 1058)
__NR_alarm = Constant('__NR_alarm', 1059)
__NR_getpgrp = Constant('__NR_getpgrp', 1060)
__NR_pause = Constant('__NR_pause', 1061)
__NR_time = Constant('__NR_time', 1062)
__NR_utime = Constant('__NR_utime', 1063)
__NR_creat = Constant('__NR_creat', 1064)
__NR_getdents = Constant('__NR_getdents', 1065)
__NR_futimesat = Constant('__NR_futimesat', 1066)
__NR_select = Constant('__NR_select', 1067)
__NR_poll = Constant('__NR_poll', 1068)
__NR_epoll_wait = Constant('__NR_epoll_wait', 1069)
__NR_ustat = Constant('__NR_ustat', 1070)
__NR_vfork = Constant('__NR_vfork', 1071)
__NR_oldwait4 = Constant('__NR_oldwait4', 1072)
__NR_recv = Constant('__NR_recv', 1073)
__NR_send = Constant('__NR_send', 1074)
__NR_bdflush = Constant('__NR_bdflush', 1075)
__NR_umount = Constant('__NR_umount', 1076)
__NR_uselib = Constant('__NR_uselib', 1077)
__NR__sysctl = Constant('__NR__sysctl', 1078)
__NR_fork = Constant('__NR_fork', 1079)
__NR_syscalls = Constant('__NR_syscalls', (1079 + 1))
__NR_sigreturn = Constant('__NR_sigreturn', 1999)
MAP_32BIT = Constant('MAP_32BIT', 0x40)
INADDR_ANY = Constant('INADDR_ANY', 0)
INADDR_BROADCAST = Constant('INADDR_BROADCAST', 0xffffffff)
INADDR_NONE = Constant('INADDR_NONE', 0xffffffff)
INADDR_LOOPBACK = Constant('INADDR_LOOPBACK', 0x7f000001)
EPERM = Constant('EPERM', 1)
ENOENT = Constant('ENOENT', 2)
ESRCH = Constant('ESRCH', 3)
EINTR = Constant('EINTR', 4)
EIO = Constant('EIO', 5)
ENXIO = Constant('ENXIO', 6)
E2BIG = Constant('E2BIG', 7)
ENOEXEC = Constant('ENOEXEC', 8)
EBADF = Constant('EBADF', 9)
ECHILD = Constant('ECHILD', 10)
EAGAIN = Constant('EAGAIN', 11)
ENOMEM = Constant('ENOMEM', 12)
EACCES = Constant('EACCES', 13)
EFAULT = Constant('EFAULT', 14)
ENOTBLK = Constant('ENOTBLK', 15)
EBUSY = Constant('EBUSY', 16)
EEXIST = Constant('EEXIST', 17)
EXDEV = Constant('EXDEV', 18)
ENODEV = Constant('ENODEV', 19)
ENOTDIR = Constant('ENOTDIR', 20)
EISDIR = Constant('EISDIR', 21)
EINVAL = Constant('EINVAL', 22)
ENFILE = Constant('ENFILE', 23)
EMFILE = Constant('EMFILE', 24)
ENOTTY = Constant('ENOTTY', 25)
ETXTBSY = Constant('ETXTBSY', 26)
EFBIG = Constant('EFBIG', 27)
ENOSPC = Constant('ENOSPC', 28)
ESPIPE = Constant('ESPIPE', 29)
EROFS = Constant('EROFS', 30)
EMLINK = Constant('EMLINK', 31)
EPIPE = Constant('EPIPE', 32)
EDOM = Constant('EDOM', 33)
ERANGE = Constant('ERANGE', 34)
EDEADLK = Constant('EDEADLK', 35)
ENAMETOOLONG = Constant('ENAMETOOLONG', 36)
ENOLCK = Constant('ENOLCK', 37)
ENOSYS = Constant('ENOSYS', 38)
ENOTEMPTY = Constant('ENOTEMPTY', 39)
ELOOP = Constant('ELOOP', 40)
EWOULDBLOCK = Constant('EWOULDBLOCK', 11)
ENOMSG = Constant('ENOMSG', 42)
EIDRM = Constant('EIDRM', 43)
ECHRNG = Constant('ECHRNG', 44)
EL2NSYNC = Constant('EL2NSYNC', 45)
EL3HLT = Constant('EL3HLT', 46)
EL3RST = Constant('EL3RST', 47)
ELNRNG = Constant('ELNRNG', 48)
EUNATCH = Constant('EUNATCH', 49)
ENOCSI = Constant('ENOCSI', 50)
EL2HLT = Constant('EL2HLT', 51)
EBADE = Constant('EBADE', 52)
EBADR = Constant('EBADR', 53)
EXFULL = Constant('EXFULL', 54)
ENOANO = Constant('ENOANO', 55)
EBADRQC = Constant('EBADRQC', 56)
EBADSLT = Constant('EBADSLT', 57)
EDEADLOCK = Constant('EDEADLOCK', 35)
EBFONT = Constant('EBFONT', 59)
ENOSTR = Constant('ENOSTR', 60)
ENODATA = Constant('ENODATA', 61)
ETIME = Constant('ETIME', 62)
ENOSR = Constant('ENOSR', 63)
ENONET = Constant('ENONET', 64)
ENOPKG = Constant('ENOPKG', 65)
EREMOTE = Constant('EREMOTE', 66)
ENOLINK = Constant('ENOLINK', 67)
EADV = Constant('EADV', 68)
ESRMNT = Constant('ESRMNT', 69)
ECOMM = Constant('ECOMM', 70)
EPROTO = Constant('EPROTO', 71)
EMULTIHOP = Constant('EMULTIHOP', 72)
EDOTDOT = Constant('EDOTDOT', 73)
EBADMSG = Constant('EBADMSG', 74)
EOVERFLOW = Constant('EOVERFLOW', 75)
ENOTUNIQ = Constant('ENOTUNIQ', 76)
EBADFD = Constant('EBADFD', 77)
EREMCHG = Constant('EREMCHG', 78)
ELIBACC = Constant('ELIBACC', 79)
ELIBBAD = Constant('ELIBBAD', 80)
ELIBSCN = Constant('ELIBSCN', 81)
ELIBMAX = Constant('ELIBMAX', 82)
ELIBEXEC = Constant('ELIBEXEC', 83)
EILSEQ = Constant('EILSEQ', 84)
ERESTART = Constant('ERESTART', 85)
ESTRPIPE = Constant('ESTRPIPE', 86)
EUSERS = Constant('EUSERS', 87)
ENOTSOCK = Constant('ENOTSOCK', 88)
EDESTADDRREQ = Constant('EDESTADDRREQ', 89)
EMSGSIZE = Constant('EMSGSIZE', 90)
EPROTOTYPE = Constant('EPROTOTYPE', 91)
ENOPROTOOPT = Constant('ENOPROTOOPT', 92)
EPROTONOSUPPORT = Constant('EPROTONOSUPPORT', 93)
ESOCKTNOSUPPORT = Constant('ESOCKTNOSUPPORT', 94)
EOPNOTSUPP = Constant('EOPNOTSUPP', 95)
ENOTSUP = Constant('ENOTSUP', 95)
EPFNOSUPPORT = Constant('EPFNOSUPPORT', 96)
EAFNOSUPPORT = Constant('EAFNOSUPPORT', 97)
EADDRINUSE = Constant('EADDRINUSE', 98)
EADDRNOTAVAIL = Constant('EADDRNOTAVAIL', 99)
ENETDOWN = Constant('ENETDOWN', 100)
ENETUNREACH = Constant('ENETUNREACH', 101)
ENETRESET = Constant('ENETRESET', 102)
ECONNABORTED = Constant('ECONNABORTED', 103)
ECONNRESET = Constant('ECONNRESET', 104)
ENOBUFS = Constant('ENOBUFS', 105)
EISCONN = Constant('EISCONN', 106)
ENOTCONN = Constant('ENOTCONN', 107)
ESHUTDOWN = Constant('ESHUTDOWN', 108)
ETOOMANYREFS = Constant('ETOOMANYREFS', 109)
ETIMEDOUT = Constant('ETIMEDOUT', 110)
ECONNREFUSED = Constant('ECONNREFUSED', 111)
EHOSTDOWN = Constant('EHOSTDOWN', 112)
EHOSTUNREACH = Constant('EHOSTUNREACH', 113)
EALREADY = Constant('EALREADY', 114)
EINPROGRESS = Constant('EINPROGRESS', 115)
ESTALE = Constant('ESTALE', 116)
EUCLEAN = Constant('EUCLEAN', 117)
ENOTNAM = Constant('ENOTNAM', 118)
ENAVAIL = Constant('ENAVAIL', 119)
EISNAM = Constant('EISNAM', 120)
EREMOTEIO = Constant('EREMOTEIO', 121)
EDQUOT = Constant('EDQUOT', 122)
ENOMEDIUM = Constant('ENOMEDIUM', 123)
EMEDIUMTYPE = Constant('EMEDIUMTYPE', 124)
ECANCELED = Constant('ECANCELED', 125)
ENOKEY = Constant('ENOKEY', 126)
EKEYEXPIRED = Constant('EKEYEXPIRED', 127)
EKEYREVOKED = Constant('EKEYREVOKED', 128)
EKEYREJECTED = Constant('EKEYREJECTED', 129)
__SYS_NERR = Constant('__SYS_NERR', ((129) + 1))
__LITTLE_ENDIAN = Constant('__LITTLE_ENDIAN', 1234)
__BIG_ENDIAN = Constant('__BIG_ENDIAN', 4321)
__BYTE_ORDER = Constant('__BYTE_ORDER', 1234)
__FLOAT_WORD_ORDER = Constant('__FLOAT_WORD_ORDER', 1234)
LITTLE_ENDIAN = Constant('LITTLE_ENDIAN', 1234)
BIG_ENDIAN = Constant('BIG_ENDIAN', 4321)
BYTE_ORDER = Constant('BYTE_ORDER', 1234)
__WORDSIZE = Constant('__WORDSIZE', 32)
__FSUID_H = Constant('__FSUID_H', 1)
NSIG = Constant('NSIG', 32)
_NSIG = Constant('_NSIG', 64)
SIGHUP = Constant('SIGHUP', 1)
SIGINT = Constant('SIGINT', 2)
SIGQUIT = Constant('SIGQUIT', 3)
SIGILL = Constant('SIGILL', 4)
SIGTRAP = Constant('SIGTRAP', 5)
SIGABRT = Constant('SIGABRT', 6)
SIGIOT = Constant('SIGIOT', 6)
SIGFPE = Constant('SIGFPE', 8)
SIGKILL = Constant('SIGKILL', 9)
SIGSEGV = Constant('SIGSEGV', 11)
SIGPIPE = Constant('SIGPIPE', 13)
SIGALRM = Constant('SIGALRM', 14)
SIGTERM = Constant('SIGTERM', 15)
SIGUNUSED = Constant('SIGUNUSED', 31)
SIGBUS = Constant('SIGBUS', 7)
SIGUSR1 = Constant('SIGUSR1', 10)
SIGUSR2 = Constant('SIGUSR2', 12)
SIGSTKFLT = Constant('SIGSTKFLT', 16)
SIGCHLD = Constant('SIGCHLD', 17)
SIGCONT = Constant('SIGCONT', 18)
SIGSTOP = Constant('SIGSTOP', 19)
SIGTSTP = Constant('SIGTSTP', 20)
SIGTTIN = Constant('SIGTTIN', 21)
SIGTTOU = Constant('SIGTTOU', 22)
SIGURG = Constant('SIGURG', 23)
SIGXCPU = Constant('SIGXCPU', 24)
SIGXFSZ = Constant('SIGXFSZ', 25)
SIGVTALRM = Constant('SIGVTALRM', 26)
SIGPROF = Constant('SIGPROF', 27)
SIGWINCH = Constant('SIGWINCH', 28)
SIGIO = Constant('SIGIO', 29)
SIGPWR = Constant('SIGPWR', 30)
SIGSYS = Constant('SIGSYS', 31)
SIGCLD = Constant('SIGCLD', 17)
SIGPOLL = Constant('SIGPOLL', 29)
SIGLOST = Constant('SIGLOST', 30)
SIGRTMIN = Constant('SIGRTMIN', 32)
SIGRTMAX = Constant('SIGRTMAX', (64 - 1))
SA_NOCLDSTOP = Constant('SA_NOCLDSTOP', 0x00000001)
SA_NOCLDWAIT = Constant('SA_NOCLDWAIT', 0x00000002)
SA_SIGINFO = Constant('SA_SIGINFO', 0x00000004)
SA_THIRTYTWO = Constant('SA_THIRTYTWO', 0x02000000)
SA_RESTORER = Constant('SA_RESTORER', 0x04000000)
SA_ONSTACK = Constant('SA_ONSTACK', 0x08000000)
SA_RESTART = Constant('SA_RESTART', 0x10000000)
SA_INTERRUPT = Constant('SA_INTERRUPT', 0x20000000)
SA_NODEFER = Constant('SA_NODEFER', 0x40000000)
SA_RESETHAND = Constant('SA_RESETHAND', 0x80000000)
SA_NOMASK = Constant('SA_NOMASK', 0x40000000)
SA_ONESHOT = Constant('SA_ONESHOT', 0x80000000)
SS_ONSTACK = Constant('SS_ONSTACK', 1)
SS_DISABLE = Constant('SS_DISABLE', 2)
MINSIGSTKSZ = Constant('MINSIGSTKSZ', 2048)
SIGSTKSZ = Constant('SIGSTKSZ', 8192)
SIG_BLOCK = Constant('SIG_BLOCK', 0)
SIG_UNBLOCK = Constant('SIG_UNBLOCK', 1)
SIG_SETMASK = Constant('SIG_SETMASK', 2)
SI_MAX_SIZE = Constant('SI_MAX_SIZE', 128)
SIGEV_SIGNAL = Constant('SIGEV_SIGNAL', 0)
SIGEV_NONE = Constant('SIGEV_NONE', 1)
SIGEV_THREAD = Constant('SIGEV_THREAD', 2)
SIGEV_THREAD_ID = Constant('SIGEV_THREAD_ID', 4)
SIGEV_MAX_SIZE = Constant('SIGEV_MAX_SIZE', 64)
_SYS_TIME_H = Constant('_SYS_TIME_H', 1)
ITIMER_REAL = Constant('ITIMER_REAL', 0)
ITIMER_VIRTUAL = Constant('ITIMER_VIRTUAL', 1)
ITIMER_PROF = Constant('ITIMER_PROF', 2)
FD_SETSIZE = Constant('FD_SETSIZE', 1024)
R_OK = Constant('R_OK', 4)
W_OK = Constant('W_OK', 2)
X_OK = Constant('X_OK', 1)
F_OK = Constant('F_OK', 0)
SEEK_SET = Constant('SEEK_SET', 0)
SEEK_CUR = Constant('SEEK_CUR', 1)
SEEK_END = Constant('SEEK_END', 2)
STDIN_FILENO = Constant('STDIN_FILENO', 0)
STDOUT_FILENO = Constant('STDOUT_FILENO', 1)
STDERR_FILENO = Constant('STDERR_FILENO', 2)
_CS_PATH = Constant('_CS_PATH', 1)
_SC_CLK_TCK = Constant('_SC_CLK_TCK', 1)
_SC_ARG_MAX = Constant('_SC_ARG_MAX', 2)
_SC_NGROUPS_MAX = Constant('_SC_NGROUPS_MAX', 3)
_SC_OPEN_MAX = Constant('_SC_OPEN_MAX', 4)
_SC_PAGESIZE = Constant('_SC_PAGESIZE', 5)
_SC_NPROCESSORS_ONLN = Constant('_SC_NPROCESSORS_ONLN', 6)
_SC_NPROCESSORS_CONF = Constant('_SC_NPROCESSORS_CONF', 6)
_SC_PHYS_PAGES = Constant('_SC_PHYS_PAGES', 7)
_PC_PATH_MAX = Constant('_PC_PATH_MAX', 1)
_PC_VDISABLE = Constant('_PC_VDISABLE', 2)
L_cuserid = Constant('L_cuserid', 17)
_POSIX_VERSION = Constant('_POSIX_VERSION', 199506)
F_ULOCK = Constant('F_ULOCK', 0)
F_LOCK = Constant('F_LOCK', 1)
F_TLOCK = Constant('F_TLOCK', 2)
F_TEST = Constant('F_TEST', 3)
STAT64_HAS_BROKEN_ST_INO = Constant('STAT64_HAS_BROKEN_ST_INO', 1)
S_IFMT = Constant('S_IFMT', 0o0170000)
S_IFSOCK = Constant('S_IFSOCK', 0o140000)
S_IFLNK = Constant('S_IFLNK', 0o120000)
S_IFREG = Constant('S_IFREG', 0o100000)
S_IFBLK = Constant('S_IFBLK', 0o060000)
S_IFDIR = Constant('S_IFDIR', 0o040000)
S_IFCHR = Constant('S_IFCHR', 0o020000)
S_IFIFO = Constant('S_IFIFO', 0o010000)
S_ISUID = Constant('S_ISUID', 0o004000)
S_ISGID = Constant('S_ISGID', 0o002000)
S_ISVTX = Constant('S_ISVTX', 0o001000)
S_IRWXU = Constant('S_IRWXU', 0o0700)
S_IRUSR = Constant('S_IRUSR', 0o0400)
S_IWUSR = Constant('S_IWUSR', 0o0200)
S_IXUSR = Constant('S_IXUSR', 0o0100)
S_IRWXG = Constant('S_IRWXG', 0o0070)
S_IRGRP = Constant('S_IRGRP', 0o0040)
S_IWGRP = Constant('S_IWGRP', 0o0020)
S_IXGRP = Constant('S_IXGRP', 0o0010)
S_IRWXO = Constant('S_IRWXO', 0o0007)
S_IROTH = Constant('S_IROTH', 0o0004)
S_IWOTH = Constant('S_IWOTH', 0o0002)
S_IXOTH = Constant('S_IXOTH', 0o0001)
S_IREAD = Constant('S_IREAD', 0o0400)
S_IWRITE = Constant('S_IWRITE', 0o0200)
S_IEXEC = Constant('S_IEXEC', 0o0100)
F_LINUX_SPECIFIC_BASE = Constant('F_LINUX_SPECIFIC_BASE', 1024)
O_ACCMODE = Constant('O_ACCMODE', 0o003)
O_RDONLY = Constant('O_RDONLY', 00)
O_WRONLY = Constant('O_WRONLY', 0o1)
O_RDWR = Constant('O_RDWR', 0o2)
O_CREAT = Constant('O_CREAT', 0o100)
O_EXCL = Constant('O_EXCL', 0o200)
O_NOCTTY = Constant('O_NOCTTY', 0o400)
O_TRUNC = Constant('O_TRUNC', 0o1000)
O_APPEND = Constant('O_APPEND', 0o2000)
O_NONBLOCK = Constant('O_NONBLOCK', 0o4000)
O_NDELAY = Constant('O_NDELAY', 0o4000)
O_SYNC = Constant('O_SYNC', 0o10000)
FASYNC = Constant('FASYNC', 0o20000)
O_DIRECTORY = Constant('O_DIRECTORY', 0o40000)
O_NOFOLLOW = Constant('O_NOFOLLOW', 0o100000)
O_DIRECT = Constant('O_DIRECT', 0o200000)
O_LARGEFILE = Constant('O_LARGEFILE', 0o400000)
O_NOATIME = Constant('O_NOATIME', 0o1000000)
F_DUPFD = Constant('F_DUPFD', 0)
F_GETFD = Constant('F_GETFD', 1)
F_SETFD = Constant('F_SETFD', 2)
F_GETFL = Constant('F_GETFL', 3)
F_SETFL = Constant('F_SETFL', 4)
F_GETLK = Constant('F_GETLK', 5)
F_SETLK = Constant('F_SETLK', 6)
F_SETLKW = Constant('F_SETLKW', 7)
F_SETOWN = Constant('F_SETOWN', 8)
F_GETOWN = Constant('F_GETOWN', 9)
F_SETSIG = Constant('F_SETSIG', 10)
F_GETSIG = Constant('F_GETSIG', 11)
F_GETLK64 = Constant('F_GETLK64', 12)
F_SETLK64 = Constant('F_SETLK64', 13)
F_SETLKW64 = Constant('F_SETLKW64', 14)
FD_CLOEXEC = Constant('FD_CLOEXEC', 1)
F_RDLCK = Constant('F_RDLCK', 0)
F_WRLCK = Constant('F_WRLCK', 1)
F_UNLCK = Constant('F_UNLCK', 2)
F_EXLCK = Constant('F_EXLCK', 4)
F_SHLCK = Constant('F_SHLCK', 8)
F_INPROGRESS = Constant('F_INPROGRESS', 16)
LOCK_SH = Constant('LOCK_SH', 1)
LOCK_EX = Constant('LOCK_EX', 2)
LOCK_NB = Constant('LOCK_NB', 4)
LOCK_UN = Constant('LOCK_UN', 8)
LOCK_MAND = Constant('LOCK_MAND', 32)
LOCK_READ = Constant('LOCK_READ', 64)
LOCK_WRITE = Constant('LOCK_WRITE', 128)
LOCK_RW = Constant('LOCK_RW', 192)
O_ASYNC = Constant('O_ASYNC', 0o20000)
MREMAP_MAYMOVE = Constant('MREMAP_MAYMOVE', 1)
MREMAP_FIXED = Constant('MREMAP_FIXED', 2)
PROT_READ = Constant('PROT_READ', 0x1)
PROT_WRITE = Constant('PROT_WRITE', 0x2)
PROT_EXEC = Constant('PROT_EXEC', 0x4)
PROT_NONE = Constant('PROT_NONE', 0x0)
MAP_SHARED = Constant('MAP_SHARED', 0x01)
MAP_PRIVATE = Constant('MAP_PRIVATE', 0x02)
MAP_FIXED = Constant('MAP_FIXED', 0x10)
MAP_ANONYMOUS = Constant('MAP_ANONYMOUS', 0x20)
MAP_GROWSDOWN = Constant('MAP_GROWSDOWN', 0x0100)
MAP_DENYWRITE = Constant('MAP_DENYWRITE', 0x0800)
MAP_EXECUTABLE = Constant('MAP_EXECUTABLE', 0x1000)
MAP_LOCKED = Constant('MAP_LOCKED', 0x2000)
MAP_NORESERVE = Constant('MAP_NORESERVE', 0x4000)
MAP_POPULATE = Constant('MAP_POPULATE', 0x8000)
MS_ASYNC = Constant('MS_ASYNC', 1)
MS_INVALIDATE = Constant('MS_INVALIDATE', 2)
MS_SYNC = Constant('MS_SYNC', 4)
MCL_CURRENT = Constant('MCL_CURRENT', 1)
MCL_FUTURE = Constant('MCL_FUTURE', 2)
MADV_NORMAL = Constant('MADV_NORMAL', 0x0)
MADV_RANDOM = Constant('MADV_RANDOM', 0x1)
MADV_SEQUENTIAL = Constant('MADV_SEQUENTIAL', 0x2)
MADV_WILLNEED = Constant('MADV_WILLNEED', 0x3)
MADV_DONTNEED = Constant('MADV_DONTNEED', 0x4)
MAP_ANON = Constant('MAP_ANON', 0x20)
MAP_FILE = Constant('MAP_FILE', 0)
SOL_SOCKET = Constant('SOL_SOCKET', 1)
SO_DEBUG = Constant('SO_DEBUG', 1)
SO_REUSEADDR = Constant('SO_REUSEADDR', 2)
SO_TYPE = Constant('SO_TYPE', 3)
SO_ERROR = Constant('SO_ERROR', 4)
SO_DONTROUTE = Constant('SO_DONTROUTE', 5)
SO_BROADCAST = Constant('SO_BROADCAST', 6)
SO_SNDBUF = Constant('SO_SNDBUF', 7)
SO_RCVBUF = Constant('SO_RCVBUF', 8)
SO_KEEPALIVE = Constant('SO_KEEPALIVE', 9)
SO_OOBINLINE = Constant('SO_OOBINLINE', 10)
SO_NO_CHECK = Constant('SO_NO_CHECK', 11)
SO_PRIORITY = Constant('SO_PRIORITY', 12)
SO_LINGER = Constant('SO_LINGER', 13)
SO_BSDCOMPAT = Constant('SO_BSDCOMPAT', 14)
SO_PASSCRED = Constant('SO_PASSCRED', 16)
SO_PEERCRED = Constant('SO_PEERCRED', 17)
SO_RCVLOWAT = Constant('SO_RCVLOWAT', 18)
SO_SNDLOWAT = Constant('SO_SNDLOWAT', 19)
SO_RCVTIMEO = Constant('SO_RCVTIMEO', 20)
SO_SNDTIMEO = Constant('SO_SNDTIMEO', 21)
SO_ACCEPTCONN = Constant('SO_ACCEPTCONN', 30)
SO_SNDBUFFORCE = Constant('SO_SNDBUFFORCE', 32)
SO_RCVBUFFORCE = Constant('SO_RCVBUFFORCE', 33)
SO_SECURITY_AUTHENTICATION = Constant('SO_SECURITY_AUTHENTICATION', 22)
SO_SECURITY_ENCRYPTION_TRANSPORT = Constant('SO_SECURITY_ENCRYPTION_TRANSPORT', 23)
SO_SECURITY_ENCRYPTION_NETWORK = Constant('SO_SECURITY_ENCRYPTION_NETWORK', 24)
SO_BINDTODEVICE = Constant('SO_BINDTODEVICE', 25)
SO_ATTACH_FILTER = Constant('SO_ATTACH_FILTER', 26)
SO_DETACH_FILTER = Constant('SO_DETACH_FILTER', 27)
SO_PEERNAME = Constant('SO_PEERNAME', 28)
SO_TIMESTAMP = Constant('SO_TIMESTAMP', 29)
SCM_TIMESTAMP = Constant('SCM_TIMESTAMP', 29)
SOCK_STREAM = Constant('SOCK_STREAM', 1)
SOCK_DGRAM = Constant('SOCK_DGRAM', 2)
SOCK_RAW = Constant('SOCK_RAW', 3)
SOCK_RDM = Constant('SOCK_RDM', 4)
SOCK_SEQPACKET = Constant('SOCK_SEQPACKET', 5)
SOCK_PACKET = Constant('SOCK_PACKET', 10)
UIO_FASTIOV = Constant('UIO_FASTIOV', 8)
UIO_MAXIOV = Constant('UIO_MAXIOV', 1024)
SCM_RIGHTS = Constant('SCM_RIGHTS', 0x01)
SCM_CREDENTIALS = Constant('SCM_CREDENTIALS', 0x02)
SCM_CONNECT = Constant('SCM_CONNECT', 0x03)
AF_UNSPEC = Constant('AF_UNSPEC', 0)
AF_UNIX = Constant('AF_UNIX', 1)
AF_LOCAL = Constant('AF_LOCAL', 1)
AF_INET = Constant('AF_INET', 2)
AF_AX25 = Constant('AF_AX25', 3)
AF_IPX = Constant('AF_IPX', 4)
AF_APPLETALK = Constant('AF_APPLETALK', 5)
AF_NETROM = Constant('AF_NETROM', 6)
AF_BRIDGE = Constant('AF_BRIDGE', 7)
AF_ATMPVC = Constant('AF_ATMPVC', 8)
AF_X25 = Constant('AF_X25', 9)
AF_INET6 = Constant('AF_INET6', 10)
AF_ROSE = Constant('AF_ROSE', 11)
AF_DECnet = Constant('AF_DECnet', 12)
AF_NETBEUI = Constant('AF_NETBEUI', 13)
AF_SECURITY = Constant('AF_SECURITY', 14)
AF_KEY = Constant('AF_KEY', 15)
AF_NETLINK = Constant('AF_NETLINK', 16)
AF_ROUTE = Constant('AF_ROUTE', 16)
AF_PACKET = Constant('AF_PACKET', 17)
AF_ASH = Constant('AF_ASH', 18)
AF_ECONET = Constant('AF_ECONET', 19)
AF_ATMSVC = Constant('AF_ATMSVC', 20)
AF_SNA = Constant('AF_SNA', 22)
AF_IRDA = Constant('AF_IRDA', 23)
AF_PPPOX = Constant('AF_PPPOX', 24)
AF_WANPIPE = Constant('AF_WANPIPE', 25)
AF_MAX = Constant('AF_MAX', 32)
PF_UNSPEC = Constant('PF_UNSPEC', 0)
PF_UNIX = Constant('PF_UNIX', 1)
PF_LOCAL = Constant('PF_LOCAL', 1)
PF_INET = Constant('PF_INET', 2)
PF_AX25 = Constant('PF_AX25', 3)
PF_IPX = Constant('PF_IPX', 4)
PF_APPLETALK = Constant('PF_APPLETALK', 5)
PF_NETROM = Constant('PF_NETROM', 6)
PF_BRIDGE = Constant('PF_BRIDGE', 7)
PF_ATMPVC = Constant('PF_ATMPVC', 8)
PF_X25 = Constant('PF_X25', 9)
PF_INET6 = Constant('PF_INET6', 10)
PF_ROSE = Constant('PF_ROSE', 11)
PF_DECnet = Constant('PF_DECnet', 12)
PF_NETBEUI = Constant('PF_NETBEUI', 13)
PF_SECURITY = Constant('PF_SECURITY', 14)
PF_KEY = Constant('PF_KEY', 15)
PF_NETLINK = Constant('PF_NETLINK', 16)
PF_ROUTE = Constant('PF_ROUTE', 16)
PF_PACKET = Constant('PF_PACKET', 17)
PF_ASH = Constant('PF_ASH', 18)
PF_ECONET = Constant('PF_ECONET', 19)
PF_ATMSVC = Constant('PF_ATMSVC', 20)
PF_SNA = Constant('PF_SNA', 22)
PF_IRDA = Constant('PF_IRDA', 23)
PF_PPPOX = Constant('PF_PPPOX', 24)
PF_WANPIPE = Constant('PF_WANPIPE', 25)
PF_MAX = Constant('PF_MAX', 32)
SOMAXCONN = Constant('SOMAXCONN', 128)
MSG_OOB = Constant('MSG_OOB', 1)
MSG_PEEK = Constant('MSG_PEEK', 2)
MSG_DONTROUTE = Constant('MSG_DONTROUTE', 4)
MSG_TRYHARD = Constant('MSG_TRYHARD', 4)
MSG_CTRUNC = Constant('MSG_CTRUNC', 8)
MSG_PROBE = Constant('MSG_PROBE', 0x10)
MSG_TRUNC = Constant('MSG_TRUNC', 0x20)
MSG_DONTWAIT = Constant('MSG_DONTWAIT', 0x40)
MSG_EOR = Constant('MSG_EOR', 0x80)
MSG_WAITALL = Constant('MSG_WAITALL', 0x100)
MSG_FIN = Constant('MSG_FIN', 0x200)
MSG_EOF = Constant('MSG_EOF', 0x200)
MSG_SYN = Constant('MSG_SYN', 0x400)
MSG_CONFIRM = Constant('MSG_CONFIRM', 0x800)
MSG_RST = Constant('MSG_RST', 0x1000)
MSG_ERRQUEUE = Constant('MSG_ERRQUEUE', 0x2000)
MSG_NOSIGNAL = Constant('MSG_NOSIGNAL', 0x4000)
MSG_MORE = Constant('MSG_MORE', 0x8000)
SOL_IP = Constant('SOL_IP', 0)
SOL_TCP = Constant('SOL_TCP', 6)
SOL_UDP = Constant('SOL_UDP', 17)
SOL_IPV6 = Constant('SOL_IPV6', 41)
SOL_ICMPV6 = Constant('SOL_ICMPV6', 58)
SOL_RAW = Constant('SOL_RAW', 255)
SOL_IPX = Constant('SOL_IPX', 256)
SOL_AX25 = Constant('SOL_AX25', 257)
SOL_ATALK = Constant('SOL_ATALK', 258)
SOL_NETROM = Constant('SOL_NETROM', 259)
SOL_ROSE = Constant('SOL_ROSE', 260)
SOL_DECNET = Constant('SOL_DECNET', 261)
SOL_X25 = Constant('SOL_X25', 262)
SOL_PACKET = Constant('SOL_PACKET', 263)
SOL_ATM = Constant('SOL_ATM', 264)
SOL_AAL = Constant('SOL_AAL', 265)
SOL_IRDA = Constant('SOL_IRDA', 266)
IPX_TYPE = Constant('IPX_TYPE', 1)
SHUT_RD = Constant('SHUT_RD', 0)
SHUT_WR = Constant('SHUT_WR', 1)
SHUT_RDWR = Constant('SHUT_RDWR', 2)
NI_NOFQDN = Constant('NI_NOFQDN', 1)
NI_NUMERICHOST = Constant('NI_NUMERICHOST', 2)
NI_NAMEREQD = Constant('NI_NAMEREQD', 4)
NI_NUMERICSERV = Constant('NI_NUMERICSERV', 8)
NI_DGRAM = Constant('NI_DGRAM', 16)
EAI_FAMILY = Constant('EAI_FAMILY', -1)
EAI_SOCKTYPE = Constant('EAI_SOCKTYPE', -2)
EAI_BADFLAGS = Constant('EAI_BADFLAGS', -3)
EAI_NONAME = Constant('EAI_NONAME', -4)
EAI_SERVICE = Constant('EAI_SERVICE', -5)
EAI_ADDRFAMILY = Constant('EAI_ADDRFAMILY', -6)
EAI_NODATA = Constant('EAI_NODATA', -7)
EAI_MEMORY = Constant('EAI_MEMORY', -8)
EAI_FAIL = Constant('EAI_FAIL', -9)
EAI_AGAIN = Constant('EAI_AGAIN', -10)
EAI_SYSTEM = Constant('EAI_SYSTEM', -11)
AI_NUMERICHOST = Constant('AI_NUMERICHOST', 1)
AI_CANONNAME = Constant('AI_CANONNAME', 2)
AI_PASSIVE = Constant('AI_PASSIVE', 4)
SIOCADDRT = Constant('SIOCADDRT', 0x890B)
SIOCDELRT = Constant('SIOCDELRT', 0x890C)
SIOCRTMSG = Constant('SIOCRTMSG', 0x890D)
SIOCGIFNAME = Constant('SIOCGIFNAME', 0x8910)
SIOCSIFLINK = Constant('SIOCSIFLINK', 0x8911)
SIOCGIFCONF = Constant('SIOCGIFCONF', 0x8912)
SIOCGIFFLAGS = Constant('SIOCGIFFLAGS', 0x8913)
SIOCSIFFLAGS = Constant('SIOCSIFFLAGS', 0x8914)
SIOCGIFADDR = Constant('SIOCGIFADDR', 0x8915)
SIOCSIFADDR = Constant('SIOCSIFADDR', 0x8916)
SIOCGIFDSTADDR = Constant('SIOCGIFDSTADDR', 0x8917)
SIOCSIFDSTADDR = Constant('SIOCSIFDSTADDR', 0x8918)
SIOCGIFBRDADDR = Constant('SIOCGIFBRDADDR', 0x8919)
SIOCSIFBRDADDR = Constant('SIOCSIFBRDADDR', 0x891a)
SIOCGIFNETMASK = Constant('SIOCGIFNETMASK', 0x891b)
SIOCSIFNETMASK = Constant('SIOCSIFNETMASK', 0x891c)
SIOCGIFMETRIC = Constant('SIOCGIFMETRIC', 0x891d)
SIOCSIFMETRIC = Constant('SIOCSIFMETRIC', 0x891e)
SIOCGIFMEM = Constant('SIOCGIFMEM', 0x891f)
SIOCSIFMEM = Constant('SIOCSIFMEM', 0x8920)
SIOCGIFMTU = Constant('SIOCGIFMTU', 0x8921)
SIOCSIFMTU = Constant('SIOCSIFMTU', 0x8922)
SIOCSIFNAME = Constant('SIOCSIFNAME', 0x8923)
SIOCSIFHWADDR = Constant('SIOCSIFHWADDR', 0x8924)
SIOCGIFENCAP = Constant('SIOCGIFENCAP', 0x8925)
SIOCSIFENCAP = Constant('SIOCSIFENCAP', 0x8926)
SIOCGIFHWADDR = Constant('SIOCGIFHWADDR', 0x8927)
SIOCGIFSLAVE = Constant('SIOCGIFSLAVE', 0x8929)
SIOCSIFSLAVE = Constant('SIOCSIFSLAVE', 0x8930)
SIOCADDMULTI = Constant('SIOCADDMULTI', 0x8931)
SIOCDELMULTI = Constant('SIOCDELMULTI', 0x8932)
SIOCGIFINDEX = Constant('SIOCGIFINDEX', 0x8933)
SIOGIFINDEX = Constant('SIOGIFINDEX', 0x8933)
SIOCSIFPFLAGS = Constant('SIOCSIFPFLAGS', 0x8934)
SIOCGIFPFLAGS = Constant('SIOCGIFPFLAGS', 0x8935)
SIOCDIFADDR = Constant('SIOCDIFADDR', 0x8936)
SIOCSIFHWBROADCAST = Constant('SIOCSIFHWBROADCAST', 0x8937)
SIOCGIFCOUNT = Constant('SIOCGIFCOUNT', 0x8938)
SIOCGIFBR = Constant('SIOCGIFBR', 0x8940)
SIOCSIFBR = Constant('SIOCSIFBR', 0x8941)
SIOCGIFTXQLEN = Constant('SIOCGIFTXQLEN', 0x8942)
SIOCSIFTXQLEN = Constant('SIOCSIFTXQLEN', 0x8943)
SIOCGIFDIVERT = Constant('SIOCGIFDIVERT', 0x8944)
SIOCSIFDIVERT = Constant('SIOCSIFDIVERT', 0x8945)
SIOCETHTOOL = Constant('SIOCETHTOOL', 0x8946)
SIOCDARP = Constant('SIOCDARP', 0x8953)
SIOCGARP = Constant('SIOCGARP', 0x8954)
SIOCSARP = Constant('SIOCSARP', 0x8955)
SIOCDRARP = Constant('SIOCDRARP', 0x8960)
SIOCGRARP = Constant('SIOCGRARP', 0x8961)
SIOCSRARP = Constant('SIOCSRARP', 0x8962)
SIOCGIFMAP = Constant('SIOCGIFMAP', 0x8970)
SIOCSIFMAP = Constant('SIOCSIFMAP', 0x8971)
SIOCADDDLCI = Constant('SIOCADDDLCI', 0x8980)
SIOCDELDLCI = Constant('SIOCDELDLCI', 0x8981)
SIOCDEVPRIVATE = Constant('SIOCDEVPRIVATE', 0x89F0)
PTRACE_TRACEME = Constant('PTRACE_TRACEME', 0)
PTRACE_PEEKTEXT = Constant('PTRACE_PEEKTEXT', 1)
PTRACE_PEEKDATA = Constant('PTRACE_PEEKDATA', 2)
PTRACE_PEEKUSR = Constant('PTRACE_PEEKUSR', 3)
PTRACE_PEEKUSER = Constant('PTRACE_PEEKUSER', 3)
PTRACE_POKETEXT = Constant('PTRACE_POKETEXT', 4)
PTRACE_POKEDATA = Constant('PTRACE_POKEDATA', 5)
PTRACE_POKEUSR = Constant('PTRACE_POKEUSR', 6)
PTRACE_POKEUSER = Constant('PTRACE_POKEUSER', 6)
PTRACE_CONT = Constant('PTRACE_CONT', 7)
PTRACE_KILL = Constant('PTRACE_KILL', 8)
PTRACE_SINGLESTEP = Constant('PTRACE_SINGLESTEP', 9)
PTRACE_ATTACH = Constant('PTRACE_ATTACH', 0x10)
PTRACE_DETACH = Constant('PTRACE_DETACH', 0x11)
PTRACE_SYSCALL = Constant('PTRACE_SYSCALL', 24)
PTRACE_GETEVENTMSG = Constant('PTRACE_GETEVENTMSG', 0x4201)
PTRACE_GETSIGINFO = Constant('PTRACE_GETSIGINFO', 0x4202)
PTRACE_SETSIGINFO = Constant('PTRACE_SETSIGINFO', 0x4203)
PTRACE_O_TRACESYSGOOD = Constant('PTRACE_O_TRACESYSGOOD', 0x00000001)
PTRACE_O_TRACEFORK = Constant('PTRACE_O_TRACEFORK', 0x00000002)
PTRACE_O_TRACEVFORK = Constant('PTRACE_O_TRACEVFORK', 0x00000004)
PTRACE_O_TRACECLONE = Constant('PTRACE_O_TRACECLONE', 0x00000008)
PTRACE_O_TRACEEXEC = Constant('PTRACE_O_TRACEEXEC', 0x00000010)
PTRACE_O_TRACEVFORKDONE = Constant('PTRACE_O_TRACEVFORKDONE', 0x00000020)
PTRACE_O_TRACEEXIT = Constant('PTRACE_O_TRACEEXIT', 0x00000040)
PTRACE_O_MASK = Constant('PTRACE_O_MASK', 0x0000007f)
PTRACE_EVENT_FORK = Constant('PTRACE_EVENT_FORK', 1)
PTRACE_EVENT_VFORK = Constant('PTRACE_EVENT_VFORK', 2)
PTRACE_EVENT_CLONE = Constant('PTRACE_EVENT_CLONE', 3)
PTRACE_EVENT_EXEC = Constant('PTRACE_EVENT_EXEC', 4)
PTRACE_EVENT_VFORK_DONE = Constant('PTRACE_EVENT_VFORK_DONE', 5)
PTRACE_EVENT_EXIT = Constant('PTRACE_EVENT_EXIT', 6)
PT_TRACE_ME = Constant('PT_TRACE_ME', 0)
PT_READ_I = Constant('PT_READ_I', 1)
PT_READ_D = Constant('PT_READ_D', 2)
PT_READ_U = Constant('PT_READ_U', 3)
PT_WRITE_I = Constant('PT_WRITE_I', 4)
PT_WRITE_D = Constant('PT_WRITE_D', 5)
PT_WRITE_U = Constant('PT_WRITE_U', 6)
PT_CONTINUE = Constant('PT_CONTINUE', 7)
PT_KILL = Constant('PT_KILL', 8)
PT_STEP = Constant('PT_STEP', 9)
PT_ATTACH = Constant('PT_ATTACH', 0x10)
PT_DETACH = Constant('PT_DETACH', 0x11)
USR26_MODE = Constant('USR26_MODE', 0x00)
FIQ26_MODE = Constant('FIQ26_MODE', 0x01)
IRQ26_MODE = Constant('IRQ26_MODE', 0x02)
SVC26_MODE = Constant('SVC26_MODE', 0x03)
USR_MODE = Constant('USR_MODE', 0x10)
FIQ_MODE = Constant('FIQ_MODE', 0x11)
IRQ_MODE = Constant('IRQ_MODE', 0x12)
SVC_MODE = Constant('SVC_MODE', 0x13)
ABT_MODE = Constant('ABT_MODE', 0x17)
UND_MODE = Constant('UND_MODE', 0x1b)
SYSTEM_MODE = Constant('SYSTEM_MODE', 0x1f)
MODE_MASK = Constant('MODE_MASK', 0x1f)
T_BIT = Constant('T_BIT', 0x20)
F_BIT = Constant('F_BIT', 0x40)
I_BIT = Constant('I_BIT', 0x80)
CC_V_BIT = Constant('CC_V_BIT', (1 << 28))
CC_C_BIT = Constant('CC_C_BIT', (1 << 29))
CC_Z_BIT = Constant('CC_Z_BIT', (1 << 30))
CC_N_BIT = Constant('CC_N_BIT', (1 << 31))
PCMASK = Constant('PCMASK', 0)
SYS_accept = Constant('SYS_accept', 202)
SYS_accept4 = Constant('SYS_accept4', 242)
SYS_access = Constant('SYS_access', 1033)
SYS_acct = Constant('SYS_acct', 89)
SYS_add_key = Constant('SYS_add_key', 217)
SYS_adjtimex = Constant('SYS_adjtimex', 171)
SYS_alarm = Constant('SYS_alarm', 1059)
SYS_bdflush = Constant('SYS_bdflush', 1075)
SYS_bind = Constant('SYS_bind', 200)
SYS_brk = Constant('SYS_brk', 214)
SYS_capget = Constant('SYS_capget', 90)
SYS_capset = Constant('SYS_capset', 91)
SYS_chdir = Constant('SYS_chdir', 49)
SYS_chmod = Constant('SYS_chmod', 1028)
SYS_chown = Constant('SYS_chown', 1029)
SYS_chroot = Constant('SYS_chroot', 51)
SYS_clock_getres = Constant('SYS_clock_getres', 114)
SYS_clock_gettime = Constant('SYS_clock_gettime', 113)
SYS_clock_nanosleep = Constant('SYS_clock_nanosleep', 115)
SYS_clock_settime = Constant('SYS_clock_settime', 112)
SYS_clone = Constant('SYS_clone', 220)
SYS_close = Constant('SYS_close', 57)
SYS_connect = Constant('SYS_connect', 203)
SYS_creat = Constant('SYS_creat', 1064)
SYS_delete_module = Constant('SYS_delete_module', 106)
SYS_dup = Constant('SYS_dup', 23)
SYS_dup2 = Constant('SYS_dup2', 1041)
SYS_dup3 = Constant('SYS_dup3', 24)
SYS_epoll_create = Constant('SYS_epoll_create', 1042)
SYS_epoll_create1 = Constant('SYS_epoll_create1', 20)
SYS_epoll_ctl = Constant('SYS_epoll_ctl', 21)
SYS_epoll_pwait = Constant('SYS_epoll_pwait', 22)
SYS_epoll_wait = Constant('SYS_epoll_wait', 1069)
SYS_eventfd = Constant('SYS_eventfd', 1044)
SYS_eventfd2 = Constant('SYS_eventfd2', 19)
SYS_execve = Constant('SYS_execve', 221)
SYS_exit = Constant('SYS_exit', 93)
SYS_exit_group = Constant('SYS_exit_group', 94)
SYS_faccessat = Constant('SYS_faccessat', 48)
SYS_fadvise64 = Constant('SYS_fadvise64', 223)
SYS_fallocate = Constant('SYS_fallocate', 47)
SYS_fanotify_init = Constant('SYS_fanotify_init', 262)
SYS_fanotify_mark = Constant('SYS_fanotify_mark', 263)
SYS_fchdir = Constant('SYS_fchdir', 50)
SYS_fchmod = Constant('SYS_fchmod', 52)
SYS_fchmodat = Constant('SYS_fchmodat', 53)
SYS_fchown = Constant('SYS_fchown', 55)
SYS_fchownat = Constant('SYS_fchownat', 54)
SYS_fcntl = Constant('SYS_fcntl', 25)
SYS_fcntl64 = Constant('SYS_fcntl64', 1052)
SYS_fdatasync = Constant('SYS_fdatasync', 83)
SYS_fgetxattr = Constant('SYS_fgetxattr', 10)
SYS_flistxattr = Constant('SYS_flistxattr', 13)
SYS_flock = Constant('SYS_flock', 32)
SYS_fork = Constant('SYS_fork', 1079)
SYS_fremovexattr = Constant('SYS_fremovexattr', 16)
SYS_fsetxattr = Constant('SYS_fsetxattr', 7)
SYS_fstat = Constant('SYS_fstat', 80)
SYS_fstat64 = Constant('SYS_fstat64', 1051)
SYS_fstatat64 = Constant('SYS_fstatat64', 79)
SYS_fstatfs = Constant('SYS_fstatfs', 44)
SYS_fstatfs64 = Constant('SYS_fstatfs64', 1055)
SYS_fsync = Constant('SYS_fsync', 82)
SYS_ftruncate = Constant('SYS_ftruncate', 46)
SYS_ftruncate64 = Constant('SYS_ftruncate64', 1047)
SYS_futex = Constant('SYS_futex', 98)
SYS_futimesat = Constant('SYS_futimesat', 1066)
SYS_getcpu = Constant('SYS_getcpu', 168)
SYS_getcwd = Constant('SYS_getcwd', 17)
SYS_getdents = Constant('SYS_getdents', 1065)
SYS_getdents64 = Constant('SYS_getdents64', 61)
SYS_getegid = Constant('SYS_getegid', 177)
SYS_geteuid = Constant('SYS_geteuid', 175)
SYS_getgid = Constant('SYS_getgid', 176)
SYS_getgroups = Constant('SYS_getgroups', 158)
SYS_getitimer = Constant('SYS_getitimer', 102)
SYS_get_mempolicy = Constant('SYS_get_mempolicy', 236)
SYS_getpeername = Constant('SYS_getpeername', 205)
SYS_getpgid = Constant('SYS_getpgid', 155)
SYS_getpgrp = Constant('SYS_getpgrp', 1060)
SYS_getpid = Constant('SYS_getpid', 172)
SYS_getppid = Constant('SYS_getppid', 173)
SYS_getpriority = Constant('SYS_getpriority', 141)
SYS_getresgid = Constant('SYS_getresgid', 150)
SYS_getresuid = Constant('SYS_getresuid', 148)
SYS_getrlimit = Constant('SYS_getrlimit', 163)
SYS_get_robust_list = Constant('SYS_get_robust_list', 100)
SYS_getrusage = Constant('SYS_getrusage', 165)
SYS_getsid = Constant('SYS_getsid', 156)
SYS_getsockname = Constant('SYS_getsockname', 204)
SYS_getsockopt = Constant('SYS_getsockopt', 209)
SYS_gettid = Constant('SYS_gettid', 178)
SYS_gettimeofday = Constant('SYS_gettimeofday', 169)
SYS_getuid = Constant('SYS_getuid', 174)
SYS_getxattr = Constant('SYS_getxattr', 8)
SYS_init_module = Constant('SYS_init_module', 105)
SYS_inotify_add_watch = Constant('SYS_inotify_add_watch', 27)
SYS_inotify_init = Constant('SYS_inotify_init', 1043)
SYS_inotify_init1 = Constant('SYS_inotify_init1', 26)
SYS_inotify_rm_watch = Constant('SYS_inotify_rm_watch', 28)
SYS_io_cancel = Constant('SYS_io_cancel', 3)
SYS_ioctl = Constant('SYS_ioctl', 29)
SYS_io_destroy = Constant('SYS_io_destroy', 1)
SYS_io_getevents = Constant('SYS_io_getevents', 4)
SYS_ioprio_get = Constant('SYS_ioprio_get', 31)
SYS_ioprio_set = Constant('SYS_ioprio_set', 30)
SYS_io_setup = Constant('SYS_io_setup', 0)
SYS_io_submit = Constant('SYS_io_submit', 2)
SYS_kexec_load = Constant('SYS_kexec_load', 104)
SYS_keyctl = Constant('SYS_keyctl', 219)
SYS_kill = Constant('SYS_kill', 129)
SYS_lchown = Constant('SYS_lchown', 1032)
SYS_lgetxattr = Constant('SYS_lgetxattr', 9)
SYS_link = Constant('SYS_link', 1025)
SYS_linkat = Constant('SYS_linkat', 37)
SYS_listen = Constant('SYS_listen', 201)
SYS_listxattr = Constant('SYS_listxattr', 11)
SYS_llistxattr = Constant('SYS_llistxattr', 12)
SYS_lookup_dcookie = Constant('SYS_lookup_dcookie', 18)
SYS_lremovexattr = Constant('SYS_lremovexattr', 15)
SYS_lseek = Constant('SYS_lseek', 62)
SYS_lsetxattr = Constant('SYS_lsetxattr', 6)
SYS_lstat = Constant('SYS_lstat', 1039)
SYS_lstat64 = Constant('SYS_lstat64', 1050)
SYS_madvise = Constant('SYS_madvise', 233)
SYS_mbind = Constant('SYS_mbind', 235)
SYS_migrate_pages = Constant('SYS_migrate_pages', 238)
SYS_mincore = Constant('SYS_mincore', 232)
SYS_mkdir = Constant('SYS_mkdir', 1030)
SYS_mkdirat = Constant('SYS_mkdirat', 34)
SYS_mknod = Constant('SYS_mknod', 1027)
SYS_mknodat = Constant('SYS_mknodat', 33)
SYS_mlock = Constant('SYS_mlock', 228)
SYS_mlockall = Constant('SYS_mlockall', 230)
SYS_mmap = Constant('SYS_mmap', 222)
SYS_mount = Constant('SYS_mount', 40)
SYS_move_pages = Constant('SYS_move_pages', 239)
SYS_mprotect = Constant('SYS_mprotect', 226)
SYS_mq_getsetattr = Constant('SYS_mq_getsetattr', 185)
SYS_mq_notify = Constant('SYS_mq_notify', 184)
SYS_mq_open = Constant('SYS_mq_open', 180)
SYS_mq_timedreceive = Constant('SYS_mq_timedreceive', 183)
SYS_mq_timedsend = Constant('SYS_mq_timedsend', 182)
SYS_mq_unlink = Constant('SYS_mq_unlink', 181)
SYS_mremap = Constant('SYS_mremap', 216)
SYS_msgctl = Constant('SYS_msgctl', 187)
SYS_msgget = Constant('SYS_msgget', 186)
SYS_msgrcv = Constant('SYS_msgrcv', 188)
SYS_msgsnd = Constant('SYS_msgsnd', 189)
SYS_msync = Constant('SYS_msync', 227)
SYS_munlock = Constant('SYS_munlock', 229)
SYS_munlockall = Constant('SYS_munlockall', 231)
SYS_munmap = Constant('SYS_munmap', 215)
SYS_nanosleep = Constant('SYS_nanosleep', 101)
SYS_newfstatat = Constant('SYS_newfstatat', 1054)
SYS_nfsservctl = Constant('SYS_nfsservctl', 42)
SYS_open = Constant('SYS_open', 1024)
SYS_openat = Constant('SYS_openat', 56)
SYS_pause = Constant('SYS_pause', 1061)
SYS_perf_event_open = Constant('SYS_perf_event_open', 241)
SYS_personality = Constant('SYS_personality', 92)
SYS_pipe = Constant('SYS_pipe', 1040)
SYS_pipe2 = Constant('SYS_pipe2', 59)
SYS_pivot_root = Constant('SYS_pivot_root', 41)
SYS_poll = Constant('SYS_poll', 1068)
SYS_ppoll = Constant('SYS_ppoll', 73)
SYS_prctl = Constant('SYS_prctl', 167)
SYS_pread64 = Constant('SYS_pread64', 67)
SYS_preadv = Constant('SYS_preadv', 69)
SYS_prlimit64 = Constant('SYS_prlimit64', 261)
SYS_pselect6 = Constant('SYS_pselect6', 72)
SYS_ptrace = Constant('SYS_ptrace', 117)
SYS_pwrite64 = Constant('SYS_pwrite64', 68)
SYS_pwritev = Constant('SYS_pwritev', 70)
SYS_quotactl = Constant('SYS_quotactl', 60)
SYS_read = Constant('SYS_read', 63)
SYS_readahead = Constant('SYS_readahead', 213)
SYS_readlink = Constant('SYS_readlink', 1035)
SYS_readlinkat = Constant('SYS_readlinkat', 78)
SYS_readv = Constant('SYS_readv', 65)
SYS_reboot = Constant('SYS_reboot', 142)
SYS_recv = Constant('SYS_recv', 1073)
SYS_recvfrom = Constant('SYS_recvfrom', 207)
SYS_recvmmsg = Constant('SYS_recvmmsg', 243)
SYS_recvmsg = Constant('SYS_recvmsg', 212)
SYS_remap_file_pages = Constant('SYS_remap_file_pages', 234)
SYS_removexattr = Constant('SYS_removexattr', 14)
SYS_rename = Constant('SYS_rename', 1034)
SYS_renameat = Constant('SYS_renameat', 38)
SYS_request_key = Constant('SYS_request_key', 218)
SYS_restart_syscall = Constant('SYS_restart_syscall', 128)
SYS_rmdir = Constant('SYS_rmdir', 1031)
SYS_rt_sigaction = Constant('SYS_rt_sigaction', 134)
SYS_rt_sigpending = Constant('SYS_rt_sigpending', 136)
SYS_rt_sigprocmask = Constant('SYS_rt_sigprocmask', 135)
SYS_rt_sigqueueinfo = Constant('SYS_rt_sigqueueinfo', 138)
SYS_rt_sigreturn = Constant('SYS_rt_sigreturn', 139)
SYS_rt_sigsuspend = Constant('SYS_rt_sigsuspend', 133)
SYS_rt_sigtimedwait = Constant('SYS_rt_sigtimedwait', 137)
SYS_rt_tgsigqueueinfo = Constant('SYS_rt_tgsigqueueinfo', 240)
SYS_sched_getaffinity = Constant('SYS_sched_getaffinity', 123)
SYS_sched_getparam = Constant('SYS_sched_getparam', 121)
SYS_sched_get_priority_max = Constant('SYS_sched_get_priority_max', 125)
SYS_sched_get_priority_min = Constant('SYS_sched_get_priority_min', 126)
SYS_sched_getscheduler = Constant('SYS_sched_getscheduler', 120)
SYS_sched_rr_get_interval = Constant('SYS_sched_rr_get_interval', 127)
SYS_sched_setaffinity = Constant('SYS_sched_setaffinity', 122)
SYS_sched_setparam = Constant('SYS_sched_setparam', 118)
SYS_sched_setscheduler = Constant('SYS_sched_setscheduler', 119)
SYS_sched_yield = Constant('SYS_sched_yield', 124)
SYS_select = Constant('SYS_select', 1067)
SYS_semctl = Constant('SYS_semctl', 191)
SYS_semget = Constant('SYS_semget', 190)
SYS_semop = Constant('SYS_semop', 193)
SYS_semtimedop = Constant('SYS_semtimedop', 192)
SYS_send = Constant('SYS_send', 1074)
SYS_sendfile = Constant('SYS_sendfile', 71)
SYS_sendfile64 = Constant('SYS_sendfile64', 1046)
SYS_sendmsg = Constant('SYS_sendmsg', 211)
SYS_sendto = Constant('SYS_sendto', 206)
SYS_setdomainname = Constant('SYS_setdomainname', 162)
SYS_setfsgid = Constant('SYS_setfsgid', 152)
SYS_setfsuid = Constant('SYS_setfsuid', 151)
SYS_setgid = Constant('SYS_setgid', 144)
SYS_setgroups = Constant('SYS_setgroups', 159)
SYS_sethostname = Constant('SYS_sethostname', 161)
SYS_setitimer = Constant('SYS_setitimer', 103)
SYS_set_mempolicy = Constant('SYS_set_mempolicy', 237)
SYS_setpgid = Constant('SYS_setpgid', 154)
SYS_setpriority = Constant('SYS_setpriority', 140)
SYS_setregid = Constant('SYS_setregid', 143)
SYS_setresgid = Constant('SYS_setresgid', 149)
SYS_setresuid = Constant('SYS_setresuid', 147)
SYS_setreuid = Constant('SYS_setreuid', 145)
SYS_setrlimit = Constant('SYS_setrlimit', 164)
SYS_set_robust_list = Constant('SYS_set_robust_list', 99)
SYS_setsid = Constant('SYS_setsid', 157)
SYS_setsockopt = Constant('SYS_setsockopt', 208)
SYS_set_tid_address = Constant('SYS_set_tid_address', 96)
SYS_settimeofday = Constant('SYS_settimeofday', 170)
SYS_setuid = Constant('SYS_setuid', 146)
SYS_setxattr = Constant('SYS_setxattr', 5)
SYS_shmat = Constant('SYS_shmat', 196)
SYS_shmctl = Constant('SYS_shmctl', 195)
SYS_shmdt = Constant('SYS_shmdt', 197)
SYS_shmget = Constant('SYS_shmget', 194)
SYS_shutdown = Constant('SYS_shutdown', 210)
SYS_sigaltstack = Constant('SYS_sigaltstack', 132)
SYS_signalfd = Constant('SYS_signalfd', 1045)
SYS_signalfd4 = Constant('SYS_signalfd4', 74)
SYS_sigreturn = Constant('SYS_sigreturn', 1999)
SYS_socket = Constant('SYS_socket', 198)
SYS_socketpair = Constant('SYS_socketpair', 199)
SYS_splice = Constant('SYS_splice', 76)
SYS_stat = Constant('SYS_stat', 1038)
SYS_stat64 = Constant('SYS_stat64', 1049)
SYS_statfs = Constant('SYS_statfs', 43)
SYS_statfs64 = Constant('SYS_statfs64', 1056)
SYS_swapoff = Constant('SYS_swapoff', 225)
SYS_swapon = Constant('SYS_swapon', 224)
SYS_symlink = Constant('SYS_symlink', 1036)
SYS_symlinkat = Constant('SYS_symlinkat', 36)
SYS_sync = Constant('SYS_sync', 81)
SYS_sync_file_range2 = Constant('SYS_sync_file_range2', 84)
SYS__sysctl = Constant('SYS__sysctl', 1078)
SYS_sysinfo = Constant('SYS_sysinfo', 179)
SYS_syslog = Constant('SYS_syslog', 116)
SYS_tee = Constant('SYS_tee', 77)
SYS_tgkill = Constant('SYS_tgkill', 131)
SYS_time = Constant('SYS_time', 1062)
SYS_timer_create = Constant('SYS_timer_create', 107)
SYS_timer_delete = Constant('SYS_timer_delete', 111)
SYS_timerfd_create = Constant('SYS_timerfd_create', 85)
SYS_timerfd_gettime = Constant('SYS_timerfd_gettime', 87)
SYS_timerfd_settime = Constant('SYS_timerfd_settime', 86)
SYS_timer_getoverrun = Constant('SYS_timer_getoverrun', 109)
SYS_timer_gettime = Constant('SYS_timer_gettime', 108)
SYS_timer_settime = Constant('SYS_timer_settime', 110)
SYS_times = Constant('SYS_times', 153)
SYS_tkill = Constant('SYS_tkill', 130)
SYS_truncate = Constant('SYS_truncate', 45)
SYS_truncate64 = Constant('SYS_truncate64', 1048)
SYS_umask = Constant('SYS_umask', 166)
SYS_umount = Constant('SYS_umount', 1076)
SYS_umount2 = Constant('SYS_umount2', 39)
SYS_uname = Constant('SYS_uname', 160)
SYS_unlink = Constant('SYS_unlink', 1026)
SYS_unlinkat = Constant('SYS_unlinkat', 35)
SYS_unshare = Constant('SYS_unshare', 97)
SYS_uselib = Constant('SYS_uselib', 1077)
SYS_ustat = Constant('SYS_ustat', 1070)
SYS_utime = Constant('SYS_utime', 1063)
SYS_utimensat = Constant('SYS_utimensat', 88)
SYS_utimes = Constant('SYS_utimes', 1037)
SYS_vfork = Constant('SYS_vfork', 1071)
SYS_vhangup = Constant('SYS_vhangup', 58)
SYS_vmsplice = Constant('SYS_vmsplice', 75)
SYS_wait4 = Constant('SYS_wait4', 260)
SYS_waitid = Constant('SYS_waitid', 95)
SYS_write = Constant('SYS_write', 64)
SYS_writev = Constant('SYS_writev', 66)
|
examples/expl_qwant.py | albmarin/MechanicalSoup | 2,530 | 11149855 | <gh_stars>1000+
"""Example usage of MechanicalSoup to get the results from the Qwant
search engine.
"""
import re
import urllib.parse
import mechanicalsoup
# Connect to duckduckgo
browser = mechanicalsoup.StatefulBrowser(user_agent='MechanicalSoup')
browser.open("https://lite.qwant.com/")
# Fill-in the search form
browser.select_form('#search-form')
browser["q"] = "MechanicalSoup"
browser.submit_selected()
# Display the results
for link in browser.page.select('.result a'):
# Qwant shows redirection links, not the actual URL, so extract
# the actual URL from the redirect link:
href = link.attrs['href']
m = re.match(r"^/redirect/[^/]*/(.*)\?.*$", href)
if m:
href = urllib.parse.unquote(m.group(1))
print(link.text, '->', href)
|
predict.py | annaproxy/udify-metalearning | 185 | 11149861 | <reponame>annaproxy/udify-metalearning
"""
Predict conllu files given a trained model
"""
import os
import shutil
import logging
import argparse
import tarfile
from pathlib import Path
from allennlp.common import Params
from allennlp.common.util import import_submodules
from allennlp.models.archival import archive_model
from udify import util
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument("archive", type=str, help="The archive file")
parser.add_argument("input_file", type=str, help="The input file to predict")
parser.add_argument("pred_file", type=str, help="The output prediction file")
parser.add_argument("--eval_file", default=None, type=str,
help="If set, evaluate the prediction and store it in the given file")
parser.add_argument("--device", default=0, type=int, help="CUDA device number; set to -1 for CPU")
parser.add_argument("--batch_size", default=1, type=int, help="The size of each prediction batch")
parser.add_argument("--lazy", action="store_true", help="Lazy load dataset")
parser.add_argument("--raw_text", action="store_true", help="Input raw sentences, one per line in the input file.")
args = parser.parse_args()
import_submodules("udify")
archive_dir = Path(args.archive).resolve().parent
if not os.path.isfile(archive_dir / "weights.th"):
with tarfile.open(args.archive) as tar:
tar.extractall(archive_dir)
config_file = archive_dir / "config.json"
overrides = {}
if args.device is not None:
overrides["trainer"] = {"cuda_device": args.device}
if args.lazy:
overrides["dataset_reader"] = {"lazy": args.lazy}
configs = [Params(overrides), Params.from_file(config_file)]
params = util.merge_configs(configs)
predictor = "udify_predictor" if not args.raw_text else "udify_text_predictor"
if not args.eval_file:
util.predict_model_with_archive(predictor, params, archive_dir, args.input_file, args.pred_file,
batch_size=args.batch_size)
else:
util.predict_and_evaluate_model_with_archive(predictor, params, archive_dir, args.input_file,
args.pred_file, args.eval_file, batch_size=args.batch_size)
|
test/vpp_qos.py | amithbraj/vpp | 751 | 11149889 | <filename>test/vpp_qos.py
"""
QoS
object abstractions for representing QoS config VPP
"""
from vpp_object import VppObject
class VppQosRecord(VppObject):
""" QoS Record(ing) configuration """
def __init__(self, test, intf, source):
self._test = test
self.intf = intf
self.source = source
def add_vpp_config(self):
self._test.vapi.qos_record_enable_disable(
enable=1,
record={'sw_if_index': self.intf.sw_if_index,
'input_source': self.source})
self._test.registry.register(self, self._test.logger)
return self
def remove_vpp_config(self):
self._test.vapi.qos_record_enable_disable(
enable=0,
record={'sw_if_index': self.intf.sw_if_index,
'input_source': self.source})
def query_vpp_config(self):
rs = self._test.vapi.qos_record_dump()
for r in rs:
if self.intf.sw_if_index == r.record.sw_if_index and \
self.source == r.record.input_source:
return True
return False
def object_id(self):
return ("qos-record-%s-%d" % (self.intf, self.source))
class VppQosStore(VppObject):
""" QoS Store(ing) configuration """
def __init__(self, test, intf, source, value):
self._test = test
self.intf = intf
self.source = source
self.value = value
def add_vpp_config(self):
self._test.vapi.qos_store_enable_disable(
enable=1,
store={'sw_if_index': self.intf.sw_if_index,
'input_source': self.source,
'value': self.value})
self._test.registry.register(self, self._test.logger)
return self
def remove_vpp_config(self):
self._test.vapi.qos_store_enable_disable(
enable=0,
store={'sw_if_index': self.intf.sw_if_index,
'input_source': self.source})
def query_vpp_config(self):
rs = self._test.vapi.qos_store_dump()
for r in rs:
if self.intf.sw_if_index == r.store.sw_if_index and \
self.source == r.store.input_source and \
self.value == r.store.value:
return True
return False
def object_id(self):
return ("qos-store-%s-%d" % (self.intf, self.source))
class VppQosEgressMap(VppObject):
""" QoS Egress Map(ping) configuration """
def __init__(self, test, id, rows):
self._test = test
self.id = id
self.rows = rows
def add_vpp_config(self):
self._test.vapi.qos_egress_map_update(
map={'id': self.id,
'rows': self.rows})
self._test.registry.register(self, self._test.logger)
return self
def remove_vpp_config(self):
self._test.vapi.qos_egress_map_delete(id=self.id)
def query_vpp_config(self):
rs = self._test.vapi.qos_egress_map_dump()
for r in rs:
if self.id == r.map.id:
return True
return False
def object_id(self):
return ("qos-map-%d" % (self.id))
class VppQosMark(VppObject):
""" QoS Mark(ing) configuration """
def __init__(self, test, intf, map, source):
self._test = test
self.intf = intf
self.source = source
self.map = map
def add_vpp_config(self):
self._test.vapi.qos_mark_enable_disable(
enable=1,
mark={'sw_if_index': self.intf.sw_if_index,
'map_id': self.map.id,
'output_source': self.source})
self._test.registry.register(self, self._test.logger)
return self
def remove_vpp_config(self):
self._test.vapi.qos_mark_enable_disable(
enable=0,
mark={'sw_if_index': self.intf.sw_if_index,
'output_source': self.source})
def query_vpp_config(self):
ms = self._test.vapi.qos_mark_dump()
for m in ms:
if self.intf.sw_if_index == m.mark.sw_if_index and \
self.source == m.mark.output_source and \
self.map.id == m.mark.map_id:
return True
return False
def object_id(self):
return ("qos-mark-%s-%d" % (self.intf, self.source))
|
test/hummingbot/connector/exchange/coinflex/test_coinflex_order_book.py | pecuniafinance/hummingbot | 542 | 11149918 | from unittest import TestCase
from hummingbot.connector.exchange.coinflex.coinflex_order_book import CoinflexOrderBook
from hummingbot.core.data_type.order_book_message import OrderBookMessageType
class CoinflexOrderBookTests(TestCase):
def test_snapshot_message_from_exchange(self):
snapshot_message = CoinflexOrderBook.snapshot_message_from_exchange(
msg={
"marketCode": "COINALPHA-HBOT",
"timestamp": 1,
"bids": [
["4.00000000", "431.00000000"]
],
"asks": [
["4.00000200", "12.00000000"]
]
},
timestamp=1640000000.0,
metadata={"trading_pair": "COINALPHA-HBOT"}
)
self.assertEqual("COINALPHA-HBOT", snapshot_message.trading_pair)
self.assertEqual(OrderBookMessageType.SNAPSHOT, snapshot_message.type)
self.assertEqual(1640000000.0, snapshot_message.timestamp)
self.assertEqual(1, snapshot_message.update_id)
self.assertEqual(-1, snapshot_message.trade_id)
self.assertEqual(1, len(snapshot_message.bids))
self.assertEqual(4.0, snapshot_message.bids[0].price)
self.assertEqual(431.0, snapshot_message.bids[0].amount)
self.assertEqual(1, snapshot_message.bids[0].update_id)
self.assertEqual(1, len(snapshot_message.asks))
self.assertEqual(4.000002, snapshot_message.asks[0].price)
self.assertEqual(12.0, snapshot_message.asks[0].amount)
self.assertEqual(1, snapshot_message.asks[0].update_id)
def test_diff_message_from_exchange(self):
diff_msg = CoinflexOrderBook.diff_message_from_exchange(
msg={
"table": "depth",
"data": [
{
"instrumentId": "COINALPHA-HBOT",
"seqNum": 1,
"timestamp": 2,
"bids": [
[
"0.0024",
"10"
]
],
"asks": [
[
"0.0026",
"100"
]
]
}
]
},
timestamp=1640000000.0,
metadata={"trading_pair": "COINALPHA-HBOT"}
)
self.assertEqual("COINALPHA-HBOT", diff_msg.trading_pair)
self.assertEqual(OrderBookMessageType.DIFF, diff_msg.type)
self.assertEqual(1640000000.0, diff_msg.timestamp)
self.assertEqual(2, diff_msg.update_id)
self.assertEqual(1, diff_msg.first_update_id)
self.assertEqual(-1, diff_msg.trade_id)
self.assertEqual(1, len(diff_msg.bids))
self.assertEqual(0.0024, diff_msg.bids[0].price)
self.assertEqual(10.0, diff_msg.bids[0].amount)
self.assertEqual(2, diff_msg.bids[0].update_id)
self.assertEqual(1, len(diff_msg.asks))
self.assertEqual(0.0026, diff_msg.asks[0].price)
self.assertEqual(100.0, diff_msg.asks[0].amount)
self.assertEqual(2, diff_msg.asks[0].update_id)
def test_trade_message_from_exchange(self):
trade_update = {
"timestamp": 1234567890123,
"marketCode": "COINALPHA-HBOT",
"tradeId": 12345,
"side": "SELL",
"price": "0.001",
"quantity": "100",
}
trade_message = CoinflexOrderBook.trade_message_from_exchange(
msg=trade_update,
metadata={"trading_pair": "COINALPHA-HBOT"}
)
self.assertEqual("COINALPHA-HBOT", trade_message.trading_pair)
self.assertEqual(OrderBookMessageType.TRADE, trade_message.type)
self.assertEqual(1234567890.123, trade_message.timestamp)
self.assertEqual(-1, trade_message.update_id)
self.assertEqual(-1, trade_message.first_update_id)
self.assertEqual(12345, trade_message.trade_id)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.