ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 7dfd214744be7a39a90f7efdbdf04a5567f3d04a | import src.cifar10.Quantize as Quantize
import src.cifar10.myInitializer as myInitializer
import src.cifar10.Option as Option
import tensorflow as tf
from tensorflow.contrib.layers import batch_norm
W_q_op = []
W_clip_op = []
def arr(stride_or_ksize, data_format='NCHW'):
# data format NCHW
if data_format == 'NCHW':
return [1, 1, stride_or_ksize, stride_or_ksize]
elif data_format == 'NHWC':
return [1, stride_or_ksize, stride_or_ksize, 1]
else:
raise NotImplementedError
def get_variable(shape, name):
with tf.name_scope(name) as scope:
w = tf.get_variable(
name=name, shape=shape,
initializer=myInitializer.variance_scaling_initializer(
factor=1.0, mode='FAN_IN', uniform=True
)
)
if Quantize.bitsW <= 16:
# manually clip and quantize W if needed
W_q_op.append(tf.assign(w, Quantize.Q(w, Quantize.bitsW)))
W_clip_op.append(tf.assign(w,Quantize.C(w, Quantize.bitsW)))
scale = Option.W_scale[-1]
return Quantize.W(w, scale)
# return W_q[-1]
else:
raise NotImplementedError
def conv(x, ksize, c_out, stride=1, padding='SAME', data_format='NCHW', name='conv'):
c_in = x.get_shape().as_list()[1 if data_format=='NCHW' else 3]
W = get_variable([ksize, ksize, c_in, c_out], name)
x = tf.nn.conv2d(x, W, arr(stride, data_format), padding=padding, data_format=data_format, name=name)
return x
def depth_conv(x, ksize, c_mul, c_out, stride=1, padding='SAME', data_format='NCHW', name='depth_conv'):
c_in = x.get_shape().as_list()[1 if data_format=='NCHW' else 3]
W_depth = get_variable([ksize, ksize, c_in, c_mul], name+'-depth')
W_point = get_variable([1, 1, c_in * c_mul, c_out], name+'-point')
x = tf.nn.separable_conv2d(x, W_depth, W_point, arr(stride, data_format), padding=padding, data_format=data_format, name=name)
return x
def fc(x, c_out, name='fc'):
c_in = x.get_shape().as_list()[1]
W = get_variable([c_in, c_out], name)
x = tf.matmul(x, W)
return x
def batch_norm(x, is_training, data_format='NCHW'):
x = tf.contrib.layers.batch_norm(x, center=True, scale=True, is_training=is_training, decay=0.9, epsilon=1e-5, fused=True, data_format=data_format)
return x
def QA(x):
if Option.bitsA <= 16:
x = Quantize.A(x)
return x
def QE(x):
if Option.bitsE <= 16:
x = Quantize.E(x)
return x
def activation(x):
x = tf.nn.relu(x)
x = QE(x)
x = QA(x)
return x
def pool(x, mtype, ksize, stride=1, padding='SAME', data_format='NCHW'):
if mtype == 'MAX':
x = tf.nn.max_pool(x, arr(ksize, data_format), arr(stride, data_format),
padding=padding, data_format=data_format)
elif mtype == 'AVG':
x = tf.nn.avg_pool(x, arr(ksize, data_format), arr(stride, data_format),
padding=padding, data_format=data_format)
else:
assert False, ('Invalid pooling type:' + mtype)
return x
if __name__ == '__main__':
x = tf.placeholder(tf.float32, shape=[1, 3, 32, 32])
x_conv = conv(x, 3, 5)
x_depth_conv = depth_conv(x, 3, 4, 5)
x_batch_norm = batch_norm(x, True)
x_actv = activation(x)
y = tf.placeholder(tf.float32, shape=[1, 32])
y_fc = fc(y, 64)
|
py | 7dfd21ffb4e9bf63274b295bf658fe013cf2203d | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mlagents/envs/communicator_objects/demonstration_meta_proto.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mlagents/envs/communicator_objects/demonstration_meta_proto.proto',
package='communicator_objects',
syntax='proto3',
serialized_options=_b('\252\002\034MLAgents.CommunicatorObjects'),
serialized_pb=_b('\nAmlagents/envs/communicator_objects/demonstration_meta_proto.proto\x12\x14\x63ommunicator_objects\"\x8d\x01\n\x16\x44\x65monstrationMetaProto\x12\x13\n\x0b\x61pi_version\x18\x01 \x01(\x05\x12\x1a\n\x12\x64\x65monstration_name\x18\x02 \x01(\t\x12\x14\n\x0cnumber_steps\x18\x03 \x01(\x05\x12\x17\n\x0fnumber_episodes\x18\x04 \x01(\x05\x12\x13\n\x0bmean_reward\x18\x05 \x01(\x02\x42\x1f\xaa\x02\x1cMLAgents.CommunicatorObjectsb\x06proto3')
)
_DEMONSTRATIONMETAPROTO = _descriptor.Descriptor(
name='DemonstrationMetaProto',
full_name='communicator_objects.DemonstrationMetaProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='api_version', full_name='communicator_objects.DemonstrationMetaProto.api_version', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='demonstration_name', full_name='communicator_objects.DemonstrationMetaProto.demonstration_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number_steps', full_name='communicator_objects.DemonstrationMetaProto.number_steps', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='number_episodes', full_name='communicator_objects.DemonstrationMetaProto.number_episodes', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mean_reward', full_name='communicator_objects.DemonstrationMetaProto.mean_reward', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=92,
serialized_end=233,
)
DESCRIPTOR.message_types_by_name['DemonstrationMetaProto'] = _DEMONSTRATIONMETAPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DemonstrationMetaProto = _reflection.GeneratedProtocolMessageType('DemonstrationMetaProto', (_message.Message,), dict(
DESCRIPTOR = _DEMONSTRATIONMETAPROTO,
__module__ = 'mlagents.envs.communicator_objects.demonstration_meta_proto_pb2'
# @@protoc_insertion_point(class_scope:communicator_objects.DemonstrationMetaProto)
))
_sym_db.RegisterMessage(DemonstrationMetaProto)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | 7dfd22f23f8de8db589304c9e288561feac56479 | """project_name URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf import settings
from django.urls import path
from django.urls import include
from django.conf.urls.static import static
# Admin customization.
admin.site.site_header = 'project_name'
admin.site.site_title = 'project_name'
admin.site.index_title = 'Administração'
urlpatterns = [
path('admin/', admin.site.urls),
# API
path('api/v1/', include('project_name.api.v1.urls', namespace='api-v1')),
# Home
path('', include('project_name.core.urls', namespace='core')),
]
if settings.DEFAULT_FILE_STORAGE == 'django.core.files.storage.FileSystemStorage':
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
py | 7dfd2337e2c1b984c9b599543a20525e7b218b49 | # put your solution here
def consec(a, n):
leng = 1
li = []
if (n == 0):
return list
for i in range (1, n + 1):
if (i == n or a[i] -
a[i - 1] != 1):
if (leng == 1):
li.append(str(a[i - leng]))
elif leng == 2:
temp = (str(a[i - leng]) + " "+
str(a[i - 1]))
li.append(temp)
else:
temp = (str(a[i - leng]) +
"-" + str(a[i - 1]))
li.append(temp)
leng = 1
else:
leng += 1
return li
if __name__ == "__main__":
n = int(input())
l = list(map(int, input().split()))
an = sorted(l)
ans = consec(an, n)
for i in range(len(ans)):
if(i == len(ans) - 1):
print (ans[i])
else:
print (ans[i], end = " ")
|
py | 7dfd235e4fc636972da7bf4c7ec923c095df98a5 | import random
import os
from io import open
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import torch
import numpy as np
class Partition(object):
def __init__(self, data, index):
self.data = data
self.index = index
def __len__(self):
return len(self.index)
def __getitem__(self, index):
data_idx = self.index[index]
return self.data[data_idx]
def get_assigned_data(self):
return self.data[self.index]
class DataPartitioner(object):
def __init__(self, data, batch_size, sizes=None, seed=1234, shuffle=True):
if sizes is None:
sizes = [0.7, 0.2, 0.1]
self.data = data
self.partitions = []
self.bsz = []
data_len = len(data)
indexes = [x for x in range(0, data_len)]
if shuffle:
rng = random.Random()
rng.seed(seed)
rng.shuffle(indexes)
for frac in sizes:
part_len = int(frac * data_len)
self.partitions.append(indexes[0: part_len])
self.bsz.append(batch_size * frac)
indexes = indexes[part_len:]
def use(self, partition):
return Partition(self.data, self.partitions[partition]), self.bsz[partition]
def partition_dataset(dataset, partition_sizes, rank, batch_size, seed):
if dataset == "wikitext2":
rnn = True
else:
rnn = False
if dataset == "mnist":
dataset = datasets.FashionMNIST('./data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
testset = datasets.FashionMNIST('./data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
elif dataset == "cifar10":
dataset = datasets.CIFAR10('./data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]))
testset = datasets.CIFAR10('./data', train=False, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]))
elif dataset == "cifar100":
dataset = datasets.CIFAR100('./data', train=True, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)),
]))
testset = datasets.CIFAR100('./data', train=False, download=True,
transform=transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)),
]))
elif dataset == "wikitext2":
corpus = Corpus("rnn_data/wikitext-2")
dataset = corpus.train
testset = corpus.test
if rnn:
partition = DataPartitioner(dataset, batch_size, partition_sizes, shuffle=False)
partition, bsz = partition.use(rank)
train_set = batchify(partition.get_assigned_data(), bsz)
eval_batch_size = 10
val_set = batchify(testset, eval_batch_size)
else:
partition = DataPartitioner(dataset, batch_size, partition_sizes, seed=seed)
partition, bsz = partition.use(rank)
train_set = DataLoader(partition, batch_size=int(bsz), shuffle=True)
val_set = DataLoader(testset, batch_size=int(bsz), shuffle=False)
return train_set, val_set, bsz
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r', encoding="utf8") as f:
for line in f:
words = line.split() + ['<eos>']
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r', encoding="utf8") as f:
idss = []
for line in f:
words = line.split() + ['<eos>']
ids = []
for word in words:
ids.append(self.dictionary.word2idx[word])
idss.append(torch.tensor(ids).type(torch.int64))
ids = torch.cat(idss)
return ids
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = len(data) // int(bsz)
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * int(bsz))
# Evenly divide the data across the bsz batches.
data = data.view(int(bsz), -1).t().contiguous()
return data |
py | 7dfd24d721bceb0f7d701f176405afd264c4fe2d | from __future__ import unicode_literals
from django.http import HttpRequest, HttpResponse, StreamingHttpResponse
from django.http.utils import conditional_content_removal
from django.test import TestCase
class HttpUtilTests(TestCase):
def test_conditional_content_removal(self):
"""
Tests that content is removed from regular and streaming responses with
a status_code of 100-199, 204, 304 or a method of "HEAD".
"""
req = HttpRequest()
# Do nothing for 200 responses.
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'abc')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'abc')
# Strip content for some status codes.
for status_code in (100, 150, 199, 204, 304):
res = HttpResponse('abc', status=status_code)
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'], status=status_code)
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
# Strip content for HEAD requests.
req.method = 'HEAD'
res = HttpResponse('abc')
conditional_content_removal(req, res)
self.assertEqual(res.content, b'')
res = StreamingHttpResponse(['abc'])
conditional_content_removal(req, res)
self.assertEqual(b''.join(res), b'')
|
py | 7dfd2646d18a81e875eed753f9e4ce21073c2c8e | """
pygments.unistring
~~~~~~~~~~~~~~~~~~
Strings of all Unicode characters of a certain category.
Used for matching in Unicode-aware languages. Run to regenerate.
Inspired by chartypes_create.py from the MoinMoin project.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
Cc = '\x00-\x1f\x7f-\x9f'
Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f'
Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff'
Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd'
Cs = '\ud800-\udbff\\\udc00\udc01-\udfff'
Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943'
Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1'
Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc'
Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921'
Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172'
Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672'
Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef'
Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959'
Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e'
No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c'
Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f'
Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d'
Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f"
Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0'
Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff'
Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1'
So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d'
Zl = '\u2028'
Zp = '\u2029'
Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000'
xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef'
xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
# Generated from unidata 11.0.0
def combine(*args):
return ''.join(globals()[cat] for cat in args)
def allexcept(*args):
newcats = cats[:]
for arg in args:
newcats.remove(arg)
return ''.join(globals()[cat] for cat in newcats)
def _handle_runs(char_list): # pragma: no cover
buf = []
for c in char_list:
if len(c) == 1:
if buf and buf[-1][1] == chr(ord(c)-1):
buf[-1] = (buf[-1][0], c)
else:
buf.append((c, c))
else:
buf.append((c, c))
for a, b in buf:
if a == b:
yield a
else:
yield '%s-%s' % (a, b)
if __name__ == '__main__': # pragma: no cover
import unicodedata
categories = {'xid_start': [], 'xid_continue': []}
with open(__file__) as fp:
content = fp.read()
header = content[:content.find('Cc =')]
footer = content[content.find("def combine("):]
for code in range(0x110000):
c = chr(code)
cat = unicodedata.category(c)
if ord(c) == 0xdc00:
# Hack to avoid combining this combining with the preceding high
# surrogate, 0xdbff, when doing a repr.
c = '\\' + c
elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
# Escape regex metachars.
c = '\\' + c
categories.setdefault(cat, []).append(c)
# XID_START and XID_CONTINUE are special categories used for matching
# identifiers in Python 3.
if c.isidentifier():
categories['xid_start'].append(c)
if ('a' + c).isidentifier():
categories['xid_continue'].append(c)
with open(__file__, 'w') as fp:
fp.write(header)
for cat in sorted(categories):
val = ''.join(_handle_runs(categories[cat]))
fp.write('%s = %a\n\n' % (cat, val))
cats = sorted(categories)
cats.remove('xid_start')
cats.remove('xid_continue')
fp.write('cats = %r\n\n' % cats)
fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
fp.write(footer)
|
py | 7dfd26547a7373ec6d750105a7c4d863206fb560 | # Copyright 2014 Hewlett-Packard Development Company, L.P
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestKeypair(helpers.TestCase):
"""Checks that the user is able to create/delete keypair."""
KEYPAIR_NAME = helpers.gen_random_resource_name("keypair")
@pytest.mark.skip(reason="Bug 1774697")
def test_keypair(self):
keypair_page = self.home_pg.\
go_to_project_compute_keypairspage()
keypair_page.create_keypair(self.KEYPAIR_NAME)
self.assertFalse(keypair_page.find_message_and_dismiss(messages.ERROR))
keypair_page = self.home_pg.\
go_to_project_compute_keypairspage()
self.assertTrue(keypair_page.is_keypair_present(self.KEYPAIR_NAME))
keypair_page.delete_keypair(self.KEYPAIR_NAME)
self.assertTrue(
keypair_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(keypair_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(keypair_page.is_keypair_present(self.KEYPAIR_NAME))
|
py | 7dfd26b10970e80fcddbc88b5ccd4e6304bd469f | # This script invokes the module for executing the algorithm. By default, if ExecuteAlgorithmWrapITK.py is available, it is used. Otherwise ExecuteAlgorithmCSwig.py is used.
import sys
try:
from ExecuteAlgorithmWrapITK import *
except:
try:
from ExecuteAlgorithmCSwig import *
except:
print "Could not load module for executing. Please Check if ExecuteAlgorithmWrapITK.py or ExecuteAlgorithmCSwig.py is reachable by Python."
|
py | 7dfd26b548dc6fb2d69b27f771f47db418fb2ac7 | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2018
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
This module implements the base class `DetectorClassifier` for classifier and detector combinations.
Paper link:
https://arxiv.org/abs/1705.07263
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from typing import List, Optional, Union, Tuple, TYPE_CHECKING
import numpy as np
from art.estimators.classification.classifier import ClassifierNeuralNetwork
if TYPE_CHECKING:
from art.utils import PREPROCESSING_TYPE
from art.data_generators import DataGenerator
from art.defences.preprocessor import Preprocessor
from art.defences.postprocessor import Postprocessor
logger = logging.getLogger(__name__)
class DetectorClassifier(ClassifierNeuralNetwork):
"""
This class implements a Classifier extension that wraps a classifier and a detector.
More details in https://arxiv.org/abs/1705.07263
"""
estimator_params = ClassifierNeuralNetwork.estimator_params + ["classifier", "detector"]
def __init__(
self,
classifier: ClassifierNeuralNetwork,
detector: ClassifierNeuralNetwork,
preprocessing_defences: Union["Preprocessor", List["Preprocessor"], None] = None,
postprocessing_defences: Union["Postprocessor", List["Postprocessor"], None] = None,
preprocessing: "PREPROCESSING_TYPE" = (0.0, 1.0),
) -> None:
"""
Initialization for the DetectorClassifier.
:param classifier: A trained classifier.
:param detector: A trained detector applied for the binary classification.
:param preprocessing_defences: Preprocessing defence(s) to be applied by the classifier. Not applicable
in this classifier.
:param postprocessing_defences: Postprocessing defence(s) to be applied by the classifier.
:param preprocessing: Tuple of the form `(subtrahend, divisor)` of floats or `np.ndarray` of values to be
used for data preprocessing. The first value will be subtracted from the input. The input will then
be divided by the second one. Not applicable in this classifier.
"""
if preprocessing_defences is not None:
raise NotImplementedError("Preprocessing is not applicable in this classifier.")
super().__init__(
model=None,
clip_values=classifier.clip_values,
preprocessing=preprocessing,
channels_first=classifier.channels_first,
preprocessing_defences=preprocessing_defences,
postprocessing_defences=postprocessing_defences,
)
self.classifier = classifier
self.detector = detector
self._nb_classes = classifier.nb_classes + 1
self._input_shape = classifier.input_shape
@property
def input_shape(self) -> Tuple[int, ...]:
"""
Return the shape of one input sample.
:return: Shape of one input sample.
"""
return self._input_shape # type: ignore
def predict(self, x: np.ndarray, batch_size: int = 128, **kwargs) -> np.ndarray:
"""
Perform prediction for a batch of inputs.
:param x: Input samples.
:param batch_size: Size of batches.
:return: Array of predictions of shape `(nb_inputs, nb_classes)`.
"""
# Compute the prediction logits
classifier_outputs = self.classifier.predict(x=x, batch_size=batch_size)
detector_outputs = self.detector.predict(x=x, batch_size=batch_size)
detector_outputs = (np.reshape(detector_outputs, [-1]) + 1) * np.max(classifier_outputs, axis=1)
detector_outputs = np.reshape(detector_outputs, [-1, 1])
combined_outputs = np.concatenate([classifier_outputs, detector_outputs], axis=1)
# Apply postprocessing
predictions = self._apply_postprocessing(preds=combined_outputs, fit=False)
return predictions
def fit(self, x: np.ndarray, y: np.ndarray, batch_size: int = 128, nb_epochs: int = 10, **kwargs) -> None:
"""
Fit the classifier on the training set `(x, y)`.
:param x: Training data.
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes).
:param batch_size: Size of batches.
:param nb_epochs: Number of epochs to use for training.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
:raises `NotImplementedException`: This method is not supported for detector-classifiers.
"""
raise NotImplementedError
def fit_generator(self, generator: "DataGenerator", nb_epochs: int = 20, **kwargs) -> None:
"""
Fit the classifier using the generator that yields batches as specified.
:param generator: Batch generator providing `(x, y)` for each epoch.
:param nb_epochs: Number of epochs to use for training.
:param kwargs: Dictionary of framework-specific arguments. This parameter is not currently supported for PyTorch
and providing it takes no effect.
:raises `NotImplementedException`: This method is not supported for detector-classifiers.
"""
raise NotImplementedError
def class_gradient( # pylint: disable=W0221
self,
x: np.ndarray,
label: Union[int, List[int], np.ndarray, None] = None,
training_mode: bool = False,
**kwargs
) -> np.ndarray:
"""
Compute per-class derivatives w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param label: Index of a specific per-class derivative. If an integer is provided, the gradient of that class
output is computed for all samples. If multiple values as provided, the first dimension should
match the batch size of `x`, and each value will be used as target for its corresponding sample in
`x`. If `None`, then gradients for all classes will be computed for each sample.
:param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.
:return: Array of gradients of input features w.r.t. each class in the form
`(batch_size, nb_classes, input_shape)` when computing for all classes, otherwise shape becomes
`(batch_size, 1, input_shape)` when `label` parameter is specified.
"""
if not ( # pragma: no cover
(label is None)
or (isinstance(label, (int, np.integer)) and label in range(self.nb_classes))
or (
isinstance(label, np.ndarray)
and len(label.shape) == 1
and (label < self.nb_classes).all()
and label.shape[0] == x.shape[0]
)
):
raise ValueError("Label %s is out of range." % label)
# Compute the gradient and return
if label is None:
combined_grads = self._compute_combined_grads(x, label=None)
elif isinstance(label, (int, np.int)):
if label < self.nb_classes - 1:
# Compute and return from the classifier gradients
combined_grads = self.classifier.class_gradient(x=x, label=label, training_mode=training_mode, **kwargs)
else:
# First compute the classifier gradients
classifier_grads = self.classifier.class_gradient(
x=x, label=None, training_mode=training_mode, **kwargs
)
# Then compute the detector gradients
detector_grads = self.detector.class_gradient(x=x, label=0, training_mode=training_mode, **kwargs)
# Chain the detector gradients for the first component
classifier_preds = self.classifier.predict(x=x)
maxind_classifier_preds = np.argmax(classifier_preds, axis=1)
max_classifier_preds = classifier_preds[np.arange(x.shape[0]), maxind_classifier_preds]
first_detector_grads = max_classifier_preds[:, None, None, None, None] * detector_grads
# Chain the detector gradients for the second component
max_classifier_grads = classifier_grads[np.arange(len(classifier_grads)), maxind_classifier_preds]
detector_preds = self.detector.predict(x=x)
second_detector_grads = max_classifier_grads * (detector_preds + 1)[:, None, None]
second_detector_grads = second_detector_grads[None, ...]
second_detector_grads = np.swapaxes(second_detector_grads, 0, 1)
# Update detector gradients
combined_grads = first_detector_grads + second_detector_grads
else:
# Compute indexes for classifier labels and detector labels
classifier_idx = np.where(label < self.nb_classes - 1)
detector_idx = np.where(label == self.nb_classes - 1)
# Initialize the combined gradients
combined_grads = np.zeros(shape=(x.shape[0], 1, x.shape[1], x.shape[2], x.shape[3]))
# First compute the classifier gradients for classifier_idx
if classifier_idx:
combined_grads[classifier_idx] = self.classifier.class_gradient(
x=x[classifier_idx], label=label[classifier_idx], training_mode=training_mode, **kwargs
)
# Then compute the detector gradients for detector_idx
if detector_idx:
# First compute the classifier gradients for detector_idx
classifier_grads = self.classifier.class_gradient(
x=x[detector_idx], label=None, training_mode=training_mode, **kwargs
)
# Then compute the detector gradients for detector_idx
detector_grads = self.detector.class_gradient(
x=x[detector_idx], label=0, training_mode=training_mode, **kwargs
)
# Chain the detector gradients for the first component
classifier_preds = self.classifier.predict(x=x[detector_idx])
maxind_classifier_preds = np.argmax(classifier_preds, axis=1)
max_classifier_preds = classifier_preds[np.arange(len(detector_idx)), maxind_classifier_preds]
first_detector_grads = max_classifier_preds[:, None, None, None, None] * detector_grads
# Chain the detector gradients for the second component
max_classifier_grads = classifier_grads[np.arange(len(classifier_grads)), maxind_classifier_preds]
detector_preds = self.detector.predict(x=x[detector_idx])
second_detector_grads = max_classifier_grads * (detector_preds + 1)[:, None, None]
second_detector_grads = second_detector_grads[None, ...]
second_detector_grads = np.swapaxes(second_detector_grads, 0, 1)
# Update detector gradients
detector_grads = first_detector_grads + second_detector_grads
# Reassign the combined gradients
combined_grads[detector_idx] = detector_grads
return combined_grads
def compute_loss(self, x: np.ndarray, y: np.ndarray, **kwargs) -> np.ndarray:
"""
Compute the loss of the neural network for samples `x`.
:param x: Samples of shape (nb_samples, nb_features) or (nb_samples, nb_pixels_1, nb_pixels_2,
nb_channels) or (nb_samples, nb_channels, nb_pixels_1, nb_pixels_2).
:param y: Target values (class labels) one-hot-encoded of shape `(nb_samples, nb_classes)` or indices
of shape `(nb_samples,)`.
:return: Loss values.
:rtype: Format as expected by the `model`
"""
raise NotImplementedError
def loss_gradient( # pylint: disable=W0221
self, x: np.ndarray, y: np.ndarray, training_mode: bool = False, **kwargs
) -> np.ndarray:
"""
Compute the gradient of the loss function w.r.t. `x`.
:param x: Sample input with shape as expected by the model.
:param y: Target values (class labels) one-hot-encoded of shape (nb_samples, nb_classes) or indices of shape
(nb_samples,).
:param training_mode: `True` for model set to training mode and `'False` for model set to evaluation mode.
:return: Array of gradients of the same shape as `x`.
:raises `NotImplementedException`: This method is not supported for detector-classifiers.
"""
raise NotImplementedError
@property
def layer_names(self) -> List[str]:
"""
Return the hidden layers in the model, if applicable. This function is not supported for the
Classifier and Detector classes.
:return: The hidden layers in the model, input and output layers excluded.
:raises `NotImplementedException`: This method is not supported for detector-classifiers.
"""
raise NotImplementedError
def get_activations(
self, x: np.ndarray, layer: Union[int, str], batch_size: int = 128, framework: bool = False
) -> np.ndarray:
"""
Return the output of the specified layer for input `x`. `layer` is specified by layer index (between 0 and
`nb_layers - 1`) or by name. The number of layers can be determined by counting the results returned by
calling `layer_names`.
:param x: Input for computing the activations.
:param layer: Layer for computing the activations.
:param batch_size: Size of batches.
:param framework: If true, return the intermediate tensor representation of the activation.
:return: The output of `layer`, where the first dimension is the batch size corresponding to `x`.
:raises `NotImplementedException`: This method is not supported for detector-classifiers.
"""
raise NotImplementedError
def save(self, filename: str, path: Optional[str] = None) -> None:
"""
Save a model to file in the format specific to the backend framework.
:param filename: Name of the file where to store the model.
:param path: Path of the folder where to store the model. If no path is specified, the model will be stored in
the default data location of the library `ART_DATA_PATH`.
"""
self.classifier.save(filename=filename + "_classifier", path=path)
self.detector.save(filename=filename + "_detector", path=path)
def __repr__(self):
repr_ = "%s(classifier=%r, detector=%r, postprocessing_defences=%r, " "preprocessing=%r)" % (
self.__module__ + "." + self.__class__.__name__,
self.classifier,
self.detector,
self.postprocessing_defences,
self.preprocessing,
)
return repr_
def _compute_combined_grads(
self, x: np.ndarray, label: Union[int, List[int], np.ndarray, None] = None
) -> np.ndarray:
# Compute the classifier gradients
classifier_grads = self.classifier.class_gradient(x=x, label=label)
# Then compute the detector gradients
detector_grads = self.detector.class_gradient(x=x, label=label)
# Chain the detector gradients for the first component
classifier_preds = self.classifier.predict(x=x)
maxind_classifier_preds = np.argmax(classifier_preds, axis=1)
max_classifier_preds = classifier_preds[np.arange(classifier_preds.shape[0]), maxind_classifier_preds]
first_detector_grads = max_classifier_preds[:, None, None, None, None] * detector_grads
# Chain the detector gradients for the second component
max_classifier_grads = classifier_grads[np.arange(len(classifier_grads)), maxind_classifier_preds]
detector_preds = self.detector.predict(x=x)
second_detector_grads = max_classifier_grads * (detector_preds + 1)[:, None, None]
second_detector_grads = second_detector_grads[None, ...]
second_detector_grads = np.swapaxes(second_detector_grads, 0, 1)
# Update detector gradients
detector_grads = first_detector_grads + second_detector_grads
# Combine the gradients
combined_logits_grads = np.concatenate([classifier_grads, detector_grads], axis=1)
return combined_logits_grads
|
py | 7dfd270606ec4cba68a27d6569dad49736474f05 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 22 13:11:38 2012
@author: proto
"""
from pyparsing import Word, Suppress,Optional,alphanums,Group,ZeroOrMore
import numpy as np
import json
import itertools
import utils.structures as st
from copy import deepcopy,copy
import detectOntology
import re
import difflib
from utils.util import logMess
from collections import defaultdict
import itertools
import math
from collections import Counter
import re
from utils.util import pmemoize as memoize
'''
This file in general classifies rules according to the information contained in
the json config file for classyfying rules according to their reactants/products
'''
@memoize
def get_close_matches(match, dataset, cutoff=0.6):
return difflib.get_close_matches(match, dataset, cutoff=cutoff)
@memoize
def sequenceMatcher(a,b):
'''
compares two strings ignoring underscores
'''
return difflib.SequenceMatcher(lambda x:x == '_',a,b).ratio()
name = Word(alphanums + '_-') + ':'
species = (Word(alphanums + "_" + ":#-")
+ Suppress('()') + Optional(Suppress('@' + Word(alphanums + '_-')))) + ZeroOrMore(Suppress('+') + Word(alphanums + "_" + ":#-")
+ Suppress("()") + Optional(Suppress('@' + Word(alphanums + '_-'))))
rate = Word(alphanums + "()")
grammar = Suppress(Optional(name)) + ((Group(species) | '0') + Suppress(Optional("<") + "->") + (Group(species) | '0') + Suppress(rate))
@memoize
def parseReactions(reaction, specialSymbols=''):
if reaction.startswith('#'):
return None
result = grammar.parseString(reaction).asList()
if len(result) < 2:
result = [result, []]
if '<->' in reaction and len(result[0]) == 1 and len(result[1]) == 2:
result.reverse()
return result
def addToDependencyGraph(dependencyGraph, label, value):
if label not in dependencyGraph:
dependencyGraph[label] = []
if value not in dependencyGraph[label] and value != []:
dependencyGraph[label].append(value)
class SBMLAnalyzer:
def __init__(self, modelParser, configurationFile, namingConventions, speciesEquivalences=None, conservationOfMass = True):
self.modelParser = modelParser
self.configurationFile = configurationFile
self.namingConventions = detectOntology.loadOntology(namingConventions)
self.userNamingConventions = copy(self.namingConventions)
self.speciesEquivalences = speciesEquivalences
self.userEquivalencesDict = None
self.lexicalSpecies = []
self.conservationOfMass = conservationOfMass
def distanceToModification(self, particle, modifiedElement, translationKeys):
posparticlePos = [m.start() + len(particle) for m in re.finditer(particle, modifiedElement)]
preparticlePos = [m.start() for m in re.finditer(particle, modifiedElement)]
keyPos = [m.start() for m in re.finditer(translationKeys, modifiedElement)]
distance = [abs(y-x) for x in posparticlePos for y in keyPos]
distance.extend([abs(y-x) for x in preparticlePos for y in keyPos])
distance.append(9999)
return min(distance)
def fuzzyArtificialReaction(self,baseElements,modifiedElement,molecules):
'''
in case we don't know how a species is composed but we know its base
elements, try to get it by concatenating its basic reactants
'''
import collections
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
equivalenceTranslator,translationKeys,conventionDict = self.processNamingConventions2(molecules)
indirectEquivalenceTranslator= {x:[] for x in equivalenceTranslator}
self.processFuzzyReaction([baseElements,modifiedElement],translationKeys,conventionDict,indirectEquivalenceTranslator)
newBaseElements = baseElements
for modification in indirectEquivalenceTranslator:
for element in indirectEquivalenceTranslator[modification]:
newBaseElements = [element[2][1] if x==element[2][0] else x for x in newBaseElements]
if compare(baseElements,newBaseElements):
return None
return newBaseElements
def analyzeSpeciesModification2(self, baseElement, modifiedElement, partialAnalysis):
"""
A method to read modifications within complexes.
"""
def index_min(values):
return min(xrange(len(values)), key=values.__getitem__)
equivalenceTranslator, translationKeys, conventionDict = self.processNamingConventions2([baseElement, modifiedElement])
differencePosition = [(i, x) for i, x in enumerate(difflib.ndiff(baseElement, modifiedElement)) if x.startswith('+')]
tmp = ''
lastIdx = 0
newDifferencePosition = []
for i in range(len(differencePosition)):
tmp += differencePosition[i][1][-1]
if tmp in translationKeys:
newDifferencePosition.append(((differencePosition[lastIdx][0] + differencePosition[i][0]) / 2, tmp))
tmp = ''
lastIdx = i
differencePosition = newDifferencePosition
if len(differencePosition) == 0:
return None, None, None
sortedPartialAnalysis = sorted(partialAnalysis, key=len, reverse=True)
tokenPosition = []
tmpModifiedElement = modifiedElement
for token in sortedPartialAnalysis:
sequenceMatcher = difflib.SequenceMatcher(None, token, tmpModifiedElement)
#sequenceMatcher2 = difflib.SequenceMatcher(None,token,baseElement)
modifiedMatchingBlocks = [m.span() for m in re.finditer(token, tmpModifiedElement)]
baseMatchingBlocks = [m.span() for m in re.finditer(token, baseElement)]
#matchingBlocks = [x for x in modifiedMatchingBlocks for y in baseMatching Blocks if ]
if len(modifiedMatchingBlocks) > 0 and len(baseMatchingBlocks) > 0:
#select the matching block with the lowest distance to the base matching block
matchingBlockIdx = index_min([min([abs((y[1]+y[0])/2 - (x[1]+x[0])/2) for y in baseMatchingBlocks]) for x in modifiedMatchingBlocks])
matchingBlock = modifiedMatchingBlocks[matchingBlockIdx]
tmpModifiedElement = list(tmpModifiedElement)
for idx in range(matchingBlock[0],matchingBlock[1]):
tmpModifiedElement[idx] = '_'
tmpModifiedElement = ''.join(tmpModifiedElement)
tokenPosition.append((matchingBlock[0],matchingBlock[1]-1))
else:
#try fuzzy search
sequenceMatcher = difflib.SequenceMatcher(None,token,tmpModifiedElement)
match = ''.join(tmpModifiedElement[j:j+n] for i, j, n in sequenceMatcher.get_matching_blocks() if n)
if (len(match)) / float(len(token)) < 0.8:
tokenPosition.append([999999999])
else:
tmp = [i for i, y in enumerate(difflib.ndiff(token, tmpModifiedElement)) if not y.startswith('+')]
if tmp[-1] - tmp[0] > len(token) + 5:
tokenPosition.append([999999999])
continue
tmpModifiedElement = list(tmpModifiedElement)
for idx in tmp:
if idx< len(tmpModifiedElement):
tmpModifiedElement[idx] = '_'
tmpModifiedElement = ''.join(tmpModifiedElement)
tmp = [tmp[0],tmp[-1]-1]
tokenPosition.append(tmp)
intersection = []
for difference in differencePosition:
distance = []
for token in tokenPosition:
distance.append(min([abs(difference[0] - subtoken) for subtoken in token]))
closestToken = sortedPartialAnalysis[index_min(distance)]
#if difference[1] in conventionDict:
intersection.append([difference[1],closestToken,min(distance)])
minimumToken = min(intersection,key=lambda x:x[2])
if intersection:
return minimumToken[1],translationKeys, equivalenceTranslator
return None, None, None
def analyzeSpeciesModification(self, baseElement, modifiedElement, partialAnalysis):
'''
a method for trying to read modifications within complexes
This is only possible once we know their internal structure
(this method is called after the creation and resolving of the dependency
graph)
'''
equivalenceTranslator, translationKeys, conventionDict = self.processNamingConventions2([baseElement, modifiedElement])
scores = []
if len(translationKeys) == 0:
'''
there's no clear lexical path between reactant and product
'''
return None, None, None
for particle in partialAnalysis:
distance = 9999
comparisonElement = max(baseElement, modifiedElement, key=len)
if re.search('(_|^){0}(_|$)'.format(particle), comparisonElement) == None:
distance = self.distanceToModification(particle, comparisonElement, translationKeys[0])
score = difflib.ndiff(particle, modifiedElement)
else:
# FIXME: make sure we only do a search on those variables that are viable
# candidates. this is once again fuzzy string matchign. there should
# be a better way of doing this with difflib
permutations = set(['_'.join(x) for x in itertools.permutations(partialAnalysis, 2) if x[0] == particle])
if all([x not in modifiedElement for x in permutations]):
distance = self.distanceToModification(particle, comparisonElement, translationKeys[0])
score = difflib.ndiff(particle, modifiedElement)
# FIXME:tis is just an ad-hoc parameter in terms of how far a mod is from a species name
# use something better
if distance < 4:
scores.append([particle, distance])
if len(scores) > 0:
winner = scores[[x[1] for x in scores].index(min([x[1] for x in scores]))][0]
else:
winner = None
if winner:
return winner, translationKeys, equivalenceTranslator
return None, None, None
def findMatchingModification(self, particle, species):
@memoize
def findMatchingModificationHelper(particle, species):
difference = difflib.ndiff(species,particle)
differenceList = tuple([x for x in difference if '+' in x])
if differenceList in self.namingConventions['patterns']:
return [self.namingConventions['patterns'][differenceList]]
fuzzyKey = ''.join([x[2:] for x in differenceList])
differenceList = self.testAgainstExistingConventions(fuzzyKey,self.namingConventions['modificationList'])
#can we state the modification as the combination of multiple modifications
if differenceList:
classificationList = []
for x in differenceList[0]:
differenceKey = tuple(['+ {0}'.format(letter) for letter in x])
classificationList.append(self.namingConventions['patterns'][differenceKey])
return classificationList
return None
return findMatchingModificationHelper(particle,species)
def greedyModificationMatching(self,speciesString, referenceSpecies):
'''
recursive function trying to map a given species string to a string permutation of the strings in reference species
>>> sa = SBMLAnalyzer(None,'./config/reactionDefinitions.json','./config/namingConventions.json')
>>> sorted(sa.greedyModificationMatching('EGF_EGFR',['EGF','EGFR']))
['EGF', 'EGFR']
>>> sorted(sa.greedyModificationMatching('EGF_EGFR_2_P_Grb2',['EGF','EGFR','EGF_EGFR_2_P','Grb2']))
['EGF_EGFR_2_P', 'Grb2']
>>> sorted(sa.greedyModificationMatching('A_B_C_D',['A','B','C','C_D','A_B_C','A_B']))
['A_B', 'C_D']
'''
bestMatch = ['', 0]
finalMatches = []
blacklist = []
while(len(blacklist)< len(referenceSpecies)):
localReferenceSpecies = [x for x in referenceSpecies if x not in blacklist and len(x) <= len(speciesString)]
for species in localReferenceSpecies:
if species in speciesString and len(species) > bestMatch[1] and species != speciesString:
bestMatch = [species,len(species)]
if bestMatch != ['', 0]:
result = self.greedyModificationMatching(speciesString.replace(bestMatch[0],''), referenceSpecies)
finalMatches = [bestMatch[0]]
if result == -1:
finalMatches = []
blacklist.append(bestMatch[0])
bestMatch = ['',0]
continue
elif result != -2:
finalMatches.extend(result)
break
elif len([x for x in speciesString if x != '_']) > 0:
return -1
else:
return -2
return finalMatches
def findClosestModification(self, particles, species, annotationDict, originalDependencyGraph):
'''
maps a set of particles to the complete set of species using lexical analysis. This step is done
independent of the reaction network.
'''
equivalenceTranslator = {}
dependencyGraph = {}
localSpeciesDict = defaultdict(lambda : defaultdict(list))
def analyzeByParticle(splitparticle,species,
equivalenceTranslator=equivalenceTranslator,
dependencyGraph=dependencyGraph):
basicElements = []
composingElements = []
splitpindex = -1
#for splitpindex in range(0,len(splitparticle)):
while (splitpindex + 1)< len(splitparticle):
splitpindex += 1
splitp = splitparticle[splitpindex]
if splitp in species:
closestList = [splitp]
similarList = get_close_matches(splitp,species)
similarList = [x for x in similarList if x != splitp and len(x) < len(splitp)]
similarList = [[x,splitp] for x in similarList]
if len(similarList) > 0:
for similarity in similarList:
#compare close lexical proximity
fuzzyList = self.processAdHocNamingConventions(similarity[0],
similarity[1],localSpeciesDict,False,species)
for reaction,tag,modifier in fuzzyList:
if modifier != None and all(['-' not in x for x in modifier]):
logMess('INFO:LAE001','Lexical relationship inferred between \
{0}, user information confirming it is required'.format(similarity))
else:
closestList = get_close_matches(splitp,species)
closestList = [x for x in closestList if len(x) < len(splitp)]
#if theres nothing in the species list i can find a lexical
#neighbor from, then try to create one based on my two
#positional neighbors
if closestList == []:
flag= True
#do i get something by merging with the previous component?
if len(composingElements) > 0:
tmp,tmp2 = analyzeByParticle([composingElements[-1] + '_' + splitp], species)
if tmp != [] and tmp2 != []:
flag = False
splitp = composingElements[-1] + '_' + splitp
composingElements.pop()
closestList = tmp
localEquivalenceTranslator,_,_ = self.processNamingConventions2([tmp[0],tmp2[0]])
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].extend(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
#do i get something by merging with the next component?
if flag and splitpindex + 1 != len(splitparticle):
tmp,tmp2 = analyzeByParticle([splitp+ '_' + splitparticle[splitpindex+1]],species)
if tmp!= [] and tmp2 != []:
splitp = splitp+ '_' + splitparticle[splitpindex+1]
splitpindex += 1
closestList = tmp
localEquivalenceTranslator,_,_ = self.processNamingConventions2([tmp[0],tmp2[0]])
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].append(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
else:
return [],[]
elif flag:
return [],[]
basicElements.append(min(closestList,key=len))
#if what i have is a known compound just add it
if splitp in species:
composingElements.append(splitp)
#if not create it
else:
closestList = get_close_matches(splitp,species)
closestList = [x for x in closestList if len(x) < len(splitp)]
flag = False
for element in closestList:
localEquivalenceTranslator,_,_ = self.processNamingConventions2([element,splitp])
if len(localEquivalenceTranslator.keys()) == 0:
basicElements = []
composingElements = []
for element in localEquivalenceTranslator:
if element not in equivalenceTranslator:
equivalenceTranslator[element] = []
equivalenceTranslator[element].append(localEquivalenceTranslator[element])
for instance in localEquivalenceTranslator[element]:
addToDependencyGraph(dependencyGraph,instance[1],[instance[0]])
flag = True
if flag:
composingElements.append(splitp)
return basicElements,composingElements
additionalHandling = []
#lexical handling
for particle in sorted(particles, key=len):
composingElements = []
basicElements = []
# can you break it down into small bites?
if '_' in particle:
splitparticle = particle.split('_')
#print '---',splitparticle
splitparticle = [x for x in splitparticle if x]
#print splitparticle
basicElements,composingElements = analyzeByParticle(splitparticle,species)
if basicElements == composingElements and basicElements:
closeMatches = get_close_matches(particle,species)
matches = [x for x in closeMatches if len(x) < len(particle) and len(x) >= 3]
for match in matches:
difference = difflib.ndiff(match,particle)
differenceList = tuple([x for x in difference if '+' in x])
if differenceList in self.namingConventions['patterns']:
logMess('INFO:LAE005', 'matching {0}={1}'.format(particle, [match]))
addToDependencyGraph(dependencyGraph,particle,[match])
if len(matches) > 0:
continue
elif particle not in composingElements and composingElements != [] and all([x in species for x in composingElements]):
addToDependencyGraph(dependencyGraph, particle, composingElements)
for element in composingElements:
if element not in dependencyGraph:
addToDependencyGraph(dependencyGraph, element, [])
if element not in particles:
additionalHandling.append(element)
continue
else:
for basicElement in basicElements:
if basicElement in particle and basicElement != particle:
fuzzyList = self.processAdHocNamingConventions(basicElement, particle, localSpeciesDict, False, species)
if self.testAgainstExistingConventions(fuzzyList[0][1], self.namingConventions['modificationList']):
addToDependencyGraph(dependencyGraph, particle, [basicElement])
logMess('INFO:LAE005', '{0} can be mapped to {1} through existing naming conventions'.format(particle, [basicElement]))
break
continue
# if bottom up doesn't work try a top down approach
for comparisonParticle in particles:
if particle == comparisonParticle:
continue
# try to map remaining orphaned molecules to each other based on simple, but known modifications
if comparisonParticle in particle:
fuzzyList = self.processAdHocNamingConventions(particle,comparisonParticle,localSpeciesDict, False, species)
if self.testAgainstExistingConventions(fuzzyList[0][1],self.namingConventions['modificationList']):
if particle in annotationDict and comparisonParticle in annotationDict:
baseSet = set([y for x in annotationDict[particle] for y in annotationDict[particle][x]])
modSet = set([y for x in annotationDict[comparisonParticle] for y in annotationDict[comparisonParticle][x]])
if len(baseSet.intersection(modSet)) == 0:
baseDB = set([x.split('/')[-2] for x in baseSet if 'identifiers.org' in x])
modDB = set([x.split('/')[-2] for x in modSet if 'identifiers.org' in x])
#we stil ahve to check that they both reference the same database
if len(baseDB.intersection(modDB)) > 0:
logMess('ERROR:ANN202', '{0}:{1}:can be mapped through naming conventions but the annotation information does not match'.format(particle, comparisonParticle))
continue
addToDependencyGraph(dependencyGraph,particle,[comparisonParticle])
logMess('INFO:LAE005', '{0} can be mapped to {1} through existing naming conventions'.format(particle, [comparisonParticle]))
break
else:
common_root = detectOntology.findLongestSubstring(particle, comparisonParticle)
# some arbitrary threshold of what makes a good minimum lenght for the common root
if len(common_root) > 0 and common_root not in originalDependencyGraph:
fuzzyList = self.processAdHocNamingConventions(common_root,comparisonParticle,localSpeciesDict, False, species)
fuzzyList2 = self.processAdHocNamingConventions(common_root,particle,localSpeciesDict, False, species)
particleMap = self.testAgainstExistingConventions(fuzzyList[0][1], self.namingConventions['modificationList'])
compParticleMap = fuzzyList2, self.testAgainstExistingConventions(fuzzyList2[0][1], self.namingConventions['modificationList'])
if particleMap and compParticleMap:
if particle in annotationDict and comparisonParticle in annotationDict:
baseSet = set([y for x in annotationDict[particle] for y in annotationDict[particle][x]])
modSet = set([y for x in annotationDict[comparisonParticle] for y in annotationDict[comparisonParticle][x]])
if len(baseSet.intersection(modSet)) == 0:
logMess('ERROR:ANN202', '{0}:{1}:can be mapped through naming conventions but the annotation information does not match'.format(particle,comparisonParticle))
break
addToDependencyGraph(dependencyGraph, particle, [common_root])
addToDependencyGraph(dependencyGraph, comparisonParticle, [common_root])
addToDependencyGraph(dependencyGraph, common_root, [])
logMess('INFO:LAE006', '{0}:{1}:can be mapped together through new common molecule {2} by existing naming conventions'.format(particle, comparisonParticle, common_root))
break
#if len(additionalHandling) > 0:
#print self.findClosestModification(set(additionalHandling),species)
return dependencyGraph,equivalenceTranslator
def loadConfigFiles(self,fileName):
'''
the reactionDefinition file must contain the definitions of the basic reaction types
we wnat to parse and what are the requirements of a given reaction type to be considered
as such
'''
reactionDefinition = ''
if fileName == '':
return []
with open(fileName,'r') as fp:
reactionDefinition = json.load(fp)
return reactionDefinition
def identifyReactions2(self,rule,reactionDefinition):
'''
This method goes through the list of common reactions listed in ruleDictionary
and tries to find how are they related according to the information in reactionDefinition
'''
result = []
for idx,element in enumerate(reactionDefinition['reactions']):
tmp1 = rule[0] if rule[0] not in ['0',['0']] else []
tmp2 = rule[1] if rule[1] not in ['0',['0']] else []
if(len(tmp1) == len(element[0]) and len(tmp2) == len(element[1])):
result.append(1)
# for (el1,el2) in (element[0],rule[0]):
# if element[0].count(el1) == element[]
else:
result.append(0)
return result
def species2Rules(self,rules):
'''
This method goes through the rule list and classifies species tuples in a dictionary
according to the reactions they appear in.
'''
ruleDictionary = {}
for idx,rule in enumerate(rules):
reaction2 = rule #list(parseReactions(rule))
totalElements = [item for sublist in reaction2 for item in sublist]
if tuple(totalElements) in ruleDictionary:
ruleDictionary[tuple(totalElements)].append(idx)
else:
ruleDictionary[tuple(totalElements)] = [idx]
return ruleDictionary
def checkCompliance(self,ruleCompliance,tupleCompliance,ruleBook):
'''
This method is mainly useful when a single reaction can be possibly classified
in different ways, but in the context of its tuple partners it can only be classified
as one
'''
ruleResult = np.zeros(len(ruleBook))
for validTupleIndex in np.nonzero(tupleCompliance):
for index in validTupleIndex:
for alternative in ruleBook[index]:
if 'r' in alternative and np.any([ruleCompliance[temp] for temp in alternative['r']]):
ruleResult[index] = 1
break
#check if just this is enough
if 'n' in alternative:
ruleResult[index] = 1
break
return ruleResult
def levenshtein(self,s1, s2):
l1 = len(s1)
l2 = len(s2)
matrix = [range(l1 + 1)] * (l2 + 1)
for zz in range(l2 + 1):
matrix[zz] = range(zz,zz + l1 + 1)
for zz in range(0,l2):
for sz in range(0,l1):
if s1[sz] == s2[zz]:
matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz])
else:
matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + 1)
return matrix[l2][l1]
def analyzeUserDefinedEquivalences(self,molecules,conventions):
equivalences = {}
smolecules = [x.strip('()') for x in molecules]
modifiedElement = {}
for convention in conventions:
baseMol = []
modMol = []
for molecule in smolecules:
if convention[0] in molecule and convention[1] not in molecule:
baseMol.append(molecule)
elif convention[1] in molecule:
modMol.append(molecule)
if convention[2] not in equivalences:
equivalences[convention[2]] = []
equivalences[convention[2]].append((convention[0],convention[1]))
if convention[0] not in modifiedElement:
modifiedElement[convention[0]] = []
modifiedElement[convention[0]].append((convention[0],convention[1]))
'''
for mol1 in baseMol:
for mol2 in modMol:
score = self.levenshtein(mol1,mol2)
if score == self.levenshtein(convention[0],convention[1]):
equivalences[convention[2]].append((mol1,mol2))
modifiedElement[convention[0]].append((mol1,mol2))
break
'''
return equivalences,modifiedElement
def processNamingConventions2(self, molecules, threshold=4, onlyUser=False):
# normal naming conventions
strippedMolecules = [x.strip('()') for x in molecules]
tmpTranslator = {}
translationKeys = []
conventionDict = {}
# FIXME: This line contains the single biggest execution bottleneck in the code
# we should be able to delete it
# user defined equivalence
if not onlyUser:
tmpTranslator, translationKeys, conventionDict = detectOntology.analyzeNamingConventions(strippedMolecules,
self.namingConventions, similarityThreshold=threshold)
# user defined naming convention
if self.userEquivalencesDict is None and hasattr(self, 'userEquivalences'):
self.userEquivalencesDict, self.modifiedElementDictionary = self.analyzeUserDefinedEquivalences(molecules, self.userEquivalences)
else:
if self.userEquivalencesDict is None:
self.userEquivalencesDict = {}
'''
for name in self.userEquivalencesDict:
equivalenceTranslator[name] = self.userEquivalencesDict[name]
'''
# add stuff to the main translator
for element in self.userEquivalencesDict:
if element not in tmpTranslator:
tmpTranslator[element] = []
tmpTranslator[element].extend(self.userEquivalencesDict[element])
return tmpTranslator, translationKeys, conventionDict
def processAdHocNamingConventions(self, reactant, product,
localSpeciesDict, compartmentChangeFlag, moleculeSet):
'''
1-1 string comparison. This method will attempt to detect if there's
a modifiation relatinship between string <reactant> and <product>
>>> sa = SBMLAnalyzer(None,'./config/reactionDefinitions.json','./config/namingConventions.json')
>>> sa.processAdHocNamingConventions('EGF_EGFR_2','EGF_EGFR_2_P', {}, False, ['EGF','EGFR', 'EGF_EGFR_2'])
[[[['EGF_EGFR_2'], ['EGF_EGFR_2_P']], '_p', ('+ _', '+ p')]]
>>> sa.processAdHocNamingConventions('A', 'A_P', {}, False,['A','A_P']) #changes neeed to be at least 3 characters long
[[[['A'], ['A_P']], None, None]]
>>> sa.processAdHocNamingConventions('Ras_GDP', 'Ras_GTP', {}, False,['Ras_GDP','Ras_GTP', 'Ras'])
[[[['Ras'], ['Ras_GDP']], '_gdp', ('+ _', '+ g', '+ d', '+ p')], [[['Ras'], ['Ras_GTP']], '_gtp', ('+ _', '+ g', '+ t', '+ p')]]
>>> sa.processAdHocNamingConventions('cRas_GDP', 'cRas_GTP', {}, False,['cRas_GDP','cRas_GTP'])
[[[['cRas'], ['cRas_GDP']], '_gdp', ('+ _', '+ g', '+ d', '+ p')], [[['cRas'], ['cRas_GTP']], '_gtp', ('+ _', '+ g', '+ t', '+ p')]]
'''
#strippedMolecules = [x.strip('()') for x in molecules]
molecules = [reactant, product] if len(reactant) < len(product) else [product, reactant]
similarityThreshold = 10
if reactant == product:
return [[[[reactant], [product]], None, None]]
namePairs, differenceList, _ = detectOntology.defineEditDistanceMatrix(molecules, similarityThreshold=similarityThreshold)
#print '+++',namePairs,differenceList
#print '---',detectOntology.defineEditDistanceMatrix2(molecules,similarityThreshold=similarityThreshold)
# FIXME:in here we need a smarter heuristic to detect actual modifications
# for now im just going with a simple heuristic that if the species name
# is long enough, and the changes from a to be are all about modification
longEnough = 3
if len(differenceList) > 0 and ((len(reactant) >= longEnough and len(reactant) >= len(differenceList[0])) or reactant in moleculeSet):
# one is strictly a subset of the other a,a_b
if len([x for x in differenceList[0] if '-' in x]) == 0:
return [[[[reactant], [product]], ''.join([x[-1] for x in differenceList[0]]), differenceList[0]]]
# string share a common subset but they contain mutually exclusive appendixes: a_b,a_c
else:
commonRoot = detectOntology.findLongestSubstring(reactant, product)
if len(commonRoot) > longEnough or commonRoot in moleculeSet:
#find if we can find a commonRoot from existing molecules
mostSimilarRealMolecules = get_close_matches(commonRoot, [x for x in moleculeSet if x not in [reactant, product]])
for commonMolecule in mostSimilarRealMolecules:
if commonMolecule in reactant and commonMolecule in product:
commonRoot = commonMolecule
logMess('DEBUG:LAE003', 'common root {0}={1}:{2}'.format(commonRoot, reactant, product))
#if commonMolecule == commonRoot.strip('_'):
# commonRoot= commonMolecule
# break
molecules = [commonRoot, reactant, product]
namePairs, differenceList, _ = detectOntology.defineEditDistanceMatrix([commonRoot, reactant], similarityThreshold=10)
namePairs2, differenceList2, _ = detectOntology.defineEditDistanceMatrix([commonRoot, product], similarityThreshold=10)
namePairs.extend(namePairs2)
#print namePairs, reactant, product
#XXX: this was just turning the heuristic off
#for element in namePairs:
# supposed modification is actually a pre-existing species. if that happens then refuse to proceeed
# if element[1] in moleculeSet:
# return [[[[reactant],[product]],None,None]]
differenceList.extend(differenceList2)
# obtain the name of the component from an anagram using the modification letters
validDifferences = [''.join([x[-1]
for x in difference])
for difference in differenceList if '-' not in [y[0]
for y in difference]]
validDifferences.sort()
# avoid trivial differences
if len(validDifferences) < 2 or any([x in moleculeSet for x in validDifferences]):
return [[[[reactant],[product]],None,None]]
# FIXME:here it'd be helpful to come up with a better heuristic
# for infered component names
# componentName = ''.join([x[0:max(1,int(math.ceil(len(x)/2.0)))] for x in validDifferences])
#for namePair,difference in zip(namePairs,differenceList):
# if len([x for x in difference if '-' in x]) == 0:
# tag = ''.join([x[-1] for x in difference])
# if [namePair[0],tag] not in localSpeciesDict[commonRoot][componentName]:
# localSpeciesDict[namePair[0]][componentName].append([namePair[0],tag,compartmentChangeFlag])
# localSpeciesDict[namePair[1]][componentName].append([namePair[0],tag,compartmentChangeFlag])
#namePairs,differenceList,_ = detectOntology.defineEditDistanceMatrix([commonRoot,product],
# similarityThreshold=similarityThreshold)
return [[[[namePairs[y][0]],[namePairs[y][1]]],''.join([x[-1] for x in differenceList[y]]),differenceList[y]] for y in range(len(differenceList))]
return [[[[reactant],[product]],None,None]]
def compareStrings(self,reactant,product,strippedMolecules):
if reactant in strippedMolecules:
if reactant in product:
return reactant,[reactant]
#pairedMolecules.append((reactant[idx],reactant[idx]))
#product.remove(reactant[idx])
#reactant.remove(reactant[idx])
else:
closeMatch = get_close_matches(reactant,product)
if len(closeMatch) == 1:
#pairedMolecules.append((reactant[idx],closeMatch[0]))
#product.remove(closeMatch[0])
#reactant.remove(reactant[idx])
return (reactant,closeMatch)
elif len(closeMatch) > 0:
s = difflib.SequenceMatcher()
s.set_seq1(reactant)
scoreDictionary = []
for match in closeMatch:
s.set_seq2(match)
scoreDictionary.append((s.ratio(),match))
scoreDictionary.sort(reverse=True)
return reactant,[closeMatch[0]]
else:
return None,[]
else:
if reactant not in product:
closeMatch = get_close_matches(reactant,product)
if len(closeMatch) == 1:
if closeMatch[0] in strippedMolecules:
return reactant,closeMatch
else:
closeMatchToBaseMolecules = get_close_matches(closeMatch[0],strippedMolecules)
if len(closeMatchToBaseMolecules) == 1:
return reactant,closeMatch
return None,closeMatch
#pairedMolecules.append((reactant[idx],closeMatch[0]))
#product.remove(closeMatch[0])
#reactant.remove(reactant[idx])
else:
return None,closeMatch
#print '****',reactant[idx],closeMatch,difflib.get_close_matches(reactant[idx],strippedMolecules)
else:
mcloseMatch = get_close_matches(reactant,strippedMolecules)
#for close in mcloseMatch:
# if close in [x for x in reaction[0]]:
# return None,[close]
return None,[reactant]
def growString(self, reactant, product, rp, pp, idx, strippedMolecules,continuityFlag):
'''
currently this is the slowest method in the system because of all those calls to difflib
'''
idx2 = 2
treactant = [rp]
tproduct = pp
pidx = product.index(pp[0])
#print reactant,rself.breakByActionableUnit([reactant,product],strippedMolecules)
while idx + idx2 <= len(reactant):
treactant2 = reactant[idx:min(len(reactant), idx + idx2)]
#if treactant2 != tproduct2:
if treactant2[-1] in strippedMolecules and continuityFlag:
break
else:
if len(reactant) > idx + idx2:
tailDifferences = get_close_matches(treactant2[-1], strippedMolecules)
if len(tailDifferences) > 0:
tdr = max([0] + [sequenceMatcher('_'.join(treactant2), x) for x in tailDifferences])
hdr = max([0] + [sequenceMatcher('_'.join(reactant[idx + idx2 - 1:idx + idx2 + 1]), x) for x in tailDifferences])
if tdr > hdr and tdr > 0.8:
treactant = treactant2
else:
tailDifferences = get_close_matches('_'.join(treactant2), strippedMolecules)
headDifferences = get_close_matches('_'.join(reactant[idx + idx2 - 1:idx + idx2 + 1]), strippedMolecules)
if len(tailDifferences) == 0:
break
elif len(headDifferences) == 0:
treactant = treactant2
break
elif len(reactant) == idx + idx2:
tailDifferences = get_close_matches('_'.join(treactant2), strippedMolecules)
if len(tailDifferences) > 0:
tdr = max([0] + [sequenceMatcher('_'.join(treactant2), x) for x in tailDifferences])
if tdr > 0.8:
treactant = treactant2
else:
break
else:
break
else:
treactant = treactant2
break
idx2 += 1
idx2 = 2
while pidx + idx2 <= len(product):
tproduct2 = product[pidx:min(len(product), pidx + idx2)]
if tproduct2[-1] in strippedMolecules and continuityFlag:
break
else:
if len(product) > pidx + idx2:
tailDifferences = get_close_matches(tproduct2[-1], strippedMolecules)
if len(tailDifferences) > 0:
tdr = max([0] + [sequenceMatcher('_'.join(tproduct2), x) for x in tailDifferences])
hdr = max([0] + [sequenceMatcher('_'.join(product[pidx + idx2 - 1:pidx + idx2 + 1]), x) for x in tailDifferences])
if tdr > hdr and tdr > 0.8:
tproduct = tproduct2
else:
tailDifferences = get_close_matches('_'.join(tproduct2), strippedMolecules, cutoff=0.8)
headDifferences = get_close_matches('_'.join(product[pidx + idx2 - 1:pidx + idx2 + 1]), strippedMolecules, cutoff=0.8)
if len(tailDifferences) == 0:
break
elif len(headDifferences) == 0 or '_'.join(tproduct2) in tailDifferences:
tproduct = tproduct2
elif len(product) == pidx + idx2:
tailDifferences = get_close_matches('_'.join(tproduct2), strippedMolecules)
if len(tailDifferences) > 0:
tdr = max([0] + [sequenceMatcher('_'.join(tproduct2), x) for x in tailDifferences])
if tdr > 0.8:
tproduct = tproduct2
else:
break
else:
break
else:
tproduct = tproduct2
break
#if '_'.join(tproduct2) in strippedMolecules and '_'.join(treactant2) in strippedMolecules:
# tproduct = tproduct2
# treactant = treactant2
#else:
idx2 += 1
return treactant, tproduct
def approximateMatching2(self, reactantString, productString, strippedMolecules, differenceParameter):
"""
The meat of the naming convention matching between reactant and product is done here
tl;dr naming conventions are hard
"""
#reactantString = [x.split('_') for x in reaction[0]]
#reactantString = [[y for y in x if y!=''] for x in reactantString]
#productString = [x.split('_') for x in reaction[1]]
#productString = [[y for y in x if y!=''] for x in productString]
pairedMolecules = [[] for _ in range(len(productString))]
pairedMolecules2 = [[] for _ in range(len(reactantString))]
for stoch, reactant in enumerate(reactantString):
idx = -1
while idx + 1 < len(reactant):
idx += 1
for stoch2, product in enumerate(productString):
#print idx2,product in enumerate(element3):
rp, pp = self.compareStrings(reactant[idx], product, strippedMolecules)
if rp and rp != pp[0]:
pairedMolecules[stoch2].append((rp, pp[0]))
pairedMolecules2[stoch].append((pp[0], rp))
product.remove(pp[0])
reactant.remove(rp)
#product.remove(pp)
#reactant.remove(rp)
idx = -1
break
elif rp:
treactant, tproduct = self.growString(reactant, product,
rp, pp, idx, strippedMolecules,continuityFlag=True)
if '_'.join(treactant) in strippedMolecules:
finalReactant = '_'.join(treactant)
else:
reactantMatches = get_close_matches('_'.join(treactant), strippedMolecules)
if len(reactantMatches) > 0:
reactantScore = [sequenceMatcher(''.join(treactant), x.replace('_','')) for x in reactantMatches]
finalReactant = reactantMatches[reactantScore.index(max(reactantScore))]
else:
finalReactant = '_'.join(treactant)
if '_'.join(tproduct) in strippedMolecules:
finalProduct = '_'.join(tproduct)
else:
productMatches = get_close_matches('_'.join(tproduct), strippedMolecules)
if len(productMatches) > 0:
productScore = [sequenceMatcher(''.join(tproduct), x.replace('_', '')) for x in productMatches]
finalProduct = productMatches[productScore.index(max(productScore))]
else:
finalProduct = '_'.join(tproduct)
pairedMolecules[stoch2].append((finalReactant, finalProduct))
pairedMolecules2[stoch].append((finalProduct, finalReactant))
for x in treactant:
reactant.remove(x)
for x in tproduct:
product.remove(x)
idx = -1
break
else:
flag = False
if pp not in [[], None]:
#if reactant[idx] == pp[0]:
treactant, tproduct = self.growString(reactant, product,
reactant[idx], pp, idx, strippedMolecules,continuityFlag=False)
#FIXME: this comparison is pretty nonsensical. treactant and tproduct are not
#guaranteed to be in teh right order. why are we comparing them both at the same time
if (len(treactant) > 1 and '_'.join(treactant) in strippedMolecules) or (len(tproduct)>1 and '_'.join(tproduct) in strippedMolecules):
pairedMolecules[stoch2].append(('_'.join(treactant), '_'.join(tproduct)))
pairedMolecules2[stoch].append(('_'.join(tproduct), '_'.join(treactant)))
for x in treactant:
reactant.remove(x)
for x in tproduct:
product.remove(x)
idx = -1
break
else:
rclose = get_close_matches('_'.join(treactant),strippedMolecules)
pclose = get_close_matches('_'.join(tproduct),strippedMolecules)
rclose2 = [x.split('_') for x in rclose]
rclose2 = ['_'.join([y for y in x if y != '']) for x in rclose2]
pclose2 = [x.split('_') for x in pclose]
pclose2 = ['_'.join([y for y in x if y != '']) for x in pclose2]
trueReactant = None
trueProduct = None
try:
trueReactant = rclose[rclose2.index('_'.join(treactant))]
trueProduct = pclose[pclose2.index('_'.join(tproduct))]
except:
pass
if trueReactant and trueProduct:
pairedMolecules[stoch2].append((trueReactant,trueProduct))
pairedMolecules2[stoch].append((trueProduct,trueReactant))
for x in treactant:
reactant.remove(x)
for x in tproduct:
product.remove(x)
idx = -1
break
if sum(len(x) for x in reactantString+productString)> 0 and self.conservationOfMass:
return None,None
else:
return pairedMolecules,pairedMolecules2
def approximateMatching(self,ruleList,differenceParameter=[]):
def curateString(element,differences,symbolList = ['#','&',';','@','!','?'],equivalenceDict={}):
'''
remove compound differencese (>2 characters) and instead represent them with symbols
returns transformed string,an equivalence dictionary and unused symbols
'''
tmp = element
for difference in differences:
if difference in element:
if difference.startswith('_'):
if difference not in equivalenceDict:
symbol = symbolList.pop()
equivalenceDict[difference] = symbol
else:
symbol = equivalenceDict[difference]
tmp = re.sub(r'{0}(_|$)'.format(difference),r'{0}\1'.format(symbol),tmp)
elif difference.endswith('_'):
if difference not in equivalenceDict:
symbol = symbolList.pop()
equivalenceDict[difference] = symbol
else:
symbol = equivalenceDict[difference]
tmp = re.sub(r'(_|^){0}'.format(difference),r'{0}\1'.format(symbol),tmp)
return tmp,symbolList,equivalenceDict
'''
given a transformation of the kind a+ b -> ~a_~b, where ~a and ~b are some
slightly modified version of a and b, this function will return a list of
lexical changes that a and b must undergo to become ~a and ~b.
'''
flag = True
if len(ruleList[1]) == 1 and ruleList[1] != '0':
differences = deepcopy(differenceParameter)
tmpRuleList = deepcopy(ruleList)
while flag:
flag = False
sym = ['#','&',';','@','!','?']
dic = {}
for idx,_ in enumerate(tmpRuleList[0]):
tmpRuleList[0][idx],sym,dic = curateString(ruleList[0][idx],differences,sym,dic)
tmpRuleList[1][0],sym,dic = curateString(ruleList[1][0],differences,sym,dic)
permutations = [x for x in itertools.permutations(ruleList[0])]
tpermutations = [x for x in itertools.permutations(tmpRuleList[0])]
score = [difflib.SequenceMatcher(None,'_'.join(x),ruleList[1][0]).ratio() \
for x in permutations]
maxindex = score.index(max(score))
ruleList[0] = list(permutations[maxindex])
tmpRuleList[0] = list(tpermutations[maxindex])
sym = [dic[x] for x in dic]
sym.extend(differences)
sym = [x for x in sym if '_' not in x]
simplifiedDifference = difflib.SequenceMatcher(lambda x: x in sym,'-'.join(tmpRuleList[0]),tmpRuleList[1][0])
matches = simplifiedDifference.get_matching_blocks()
if len(matches) != len(ruleList[0]) + 1:
return [[],[]],[[],[]]
productPartitions = []
for idx,match in enumerate(matches):
if matches[idx][2] != 0:
productPartitions.append(tmpRuleList[1][0][
matches[idx][1]:matches[idx][1]+matches[idx][2]])
reactantPartitions = tmpRuleList[0]
#Don't count trailing underscores as part of the species name
for idx,_ in enumerate(reactantPartitions):
reactantPartitions[idx] = reactantPartitions[idx].strip('_')
for idx,_ in enumerate(productPartitions):
productPartitions[idx] = productPartitions[idx].strip('_')
#greedymatching
acc=0
#FIXME:its not properly copying all the string
for idx in range(0,len(matches)-1):
while matches[idx][2]+ acc < len(tmpRuleList[1][0]) \
and tmpRuleList[1][0][matches[idx][2]+ acc] in sym:
productPartitions[idx] += tmpRuleList[1][0][matches[idx][2] + acc]
acc += 1
#idx = 0
#while(tmpString[matches[0][2]+ idx] in sym):
# reactantfirstHalf += tmpString[matches[0][2] + idx]
# idx += 1
for element in dic:
for idx in range(len(productPartitions)):
productPartitions[idx] = productPartitions[idx].replace(dic[element],element)
reactantPartitions[idx] = reactantPartitions[idx].replace(dic[element],element)
zippedPartitions = zip(reactantPartitions,productPartitions)
zippedPartitions = [sorted(x,key=len) for x in zippedPartitions]
bdifferences = [[z for z in y if '+ ' in z or '- ' in z] for y in \
[difflib.ndiff(*x) for x in zippedPartitions]]
processedDifferences = [''.join([y.strip('+ ') for y in x]) for x in bdifferences]
for idx,processedDifference in enumerate(processedDifferences):
if processedDifference not in differences and \
'- ' not in processedDifference and bdifferences[idx] != []:
flag = True
differences.append(processedDifference)
else:
#TODO: dea with reactions of the kindd a+b -> c + d
return [[],[]],[[],[]]
return bdifferences,zippedPartitions
def getReactionClassification(self,reactionDefinition,rules,equivalenceTranslator,
indirectEquivalenceTranslator,
translationKeys=[]):
'''
*reactionDefinition* is a list of conditions that must be met for a reaction
to be classified a certain way
*rules* is the list of reactions
*equivalenceTranslator* is a dictinary containing all complexes that have been
determined to be the same through naming conventions
This method will go through the list of rules and the list of rule definitions
and tell us which rules it can classify according to the rule definitions list
provided
'''
ruleDictionary = self.species2Rules(rules)
#determines a reaction's reactionStructure aka stoichoimetry
ruleComplianceMatrix = np.zeros((len(rules),len(reactionDefinition['reactions'])))
for (idx,rule) in enumerate(rules):
reaction2 = rule #list(parseReactions(rule))
ruleComplianceMatrix[idx] = self.identifyReactions2(reaction2,reactionDefinition)
#initialize the tupleComplianceMatrix array with the same keys as ruleDictionary
#the tuple complianceMatrix is basically there to make sure we evaluate
#bidirectional reactions as one reaction
tupleComplianceMatrix = {key:np.zeros((len(reactionDefinition['reactions']))) for key in ruleDictionary}
#check which reaction conditions each tuple satisfies
for element in ruleDictionary:
for rule in ruleDictionary[element]:
tupleComplianceMatrix[element] += ruleComplianceMatrix[rule]
#now we will check for the nameConventionMatrix (same thing as before but for naming conventions)
tupleNameComplianceMatrix = {key:{key2:0 for key2 in equivalenceTranslator} \
for key in ruleDictionary}
for rule in ruleDictionary:
for namingConvention in equivalenceTranslator:
for equivalence in equivalenceTranslator[namingConvention]:
if all(element in rule for element in equivalence):
tupleNameComplianceMatrix[rule][namingConvention] +=1
break
for equivalence in indirectEquivalenceTranslator[namingConvention]:
if all(element in rule for element in equivalence[0]):
tupleNameComplianceMatrix[rule][namingConvention] +=1
break
#we can have more than one
#elif appro
#check if the reaction conditions each tuple satisfies are enough to get classified
#as an specific named reaction type
tupleDefinitionMatrix = {key:np.zeros((len(reactionDefinition['definitions']))) for key in ruleDictionary}
for key,element in tupleComplianceMatrix.items():
for idx,member in enumerate(reactionDefinition['definitions']):
for alternative in member:
if 'r' in alternative:
tupleDefinitionMatrix[key][idx] += np.all([element[reaction] for reaction in alternative[u'r']])
if 'n' in alternative and reactionDefinition['reactionsNames'][idx] in equivalenceTranslator:
tupleDefinitionMatrix[key][idx] += np.all([tupleNameComplianceMatrix[key][reactionDefinition['reactionsNames'][idx]]])
#cotains which rules are equal to reactions defined in reactionDefinitions['definitions']
#use the per tuple classification to obtain a per reaction classification
ruleDefinitionMatrix = np.zeros((len(rules),len(reactionDefinition['definitions'])))
for key,element in ruleDictionary.items():
for rule in element:
ruleDefinitionMatrix[rule] = self.checkCompliance(ruleComplianceMatrix[rule],
tupleDefinitionMatrix[key],reactionDefinition['definitions'])
#use reactionDefinitions reactionNames field to actually tell us what reaction
#type each reaction is
results = []
for idx,element in enumerate(ruleDefinitionMatrix):
nonZero = np.nonzero(element)[0]
if(len(nonZero) == 0):
results.append('None')
#todo: need to do something if it matches more than one reaction
else:
classifications = [reactionDefinition['reactionsNames'][x] for x in nonZero]
#FIXME: we should be able to support more than one transformation
results.append(classifications[0])
return results
def setConfigurationFile(self,configurationFile):
self.configurationFile = configurationFile
def getReactionProperties(self):
'''
if we are using a naming convention definition in the json file
this method will return the component and state names that this
reaction uses
'''
#TODO: once we transition completely to a naming convention delete
#this ----
reactionTypeProperties = {}
reactionDefinition = self.loadConfigFiles(self.configurationFile)
if self.speciesEquivalences != None:
self.userEquivalences = self.loadConfigFiles(self.speciesEquivalences)['reactionDefinition']
for reactionType,properties in zip(reactionDefinition['reactionsNames'],reactionDefinition['definitions']):
#if its a reaction defined by its naming convention
#xxxxxxxxxxxxxxxxxxx
for alternative in properties:
if 'n' in alternative.keys():
try:
site = reactionDefinition['reactionSite'][alternative['rsi']]
state = reactionDefinition['reactionState'][alternative['rst']]
except:
site = reactionType
state = reactionType[0]
reactionTypeProperties[reactionType] = [site,state]
#TODO: end of delete
reactionDefinition = self.namingConventions
for idx,reactionType in enumerate(reactionDefinition['modificationList']):
site = reactionDefinition['reactionSite'][reactionDefinition['definitions'][idx]['rsi']]
state = reactionDefinition['reactionState'][reactionDefinition['definitions'][idx]['rst']]
reactionTypeProperties[reactionType] = [site,state]
return reactionTypeProperties
def processFuzzyReaction(self,reaction,translationKeys,conventionDict,indirectEquivalenceTranslator):
differences,pairedChemicals= self.approximateMatching(reaction,
translationKeys)
#matching,matching2 = self.approximateMatching2(reaction,strippedMolecules,
# translationKeys)
d1,d2 = differences[0],differences[1]
firstMatch,secondMatch = pairedChemicals[0],pairedChemicals[1]
matches = [firstMatch,secondMatch]
for index,element in enumerate([d1,d2]):
idx1=0
idx2 = 1
while idx2 <= len(element):
if (element[idx1],) in conventionDict.keys():
pattern = conventionDict[(element[idx1],)]
indirectEquivalenceTranslator[pattern].append([[reaction[0][index],reaction[1][0]],reaction[0],matches[index],reaction[1]])
elif (element[idx1].replace('-','+'),) in conventionDict.keys():
matches[index].reverse()
transformedPattern = conventionDict[(element[idx1].replace('-','+'),) ]
indirectEquivalenceTranslator[transformedPattern].append([[reaction[1][0],reaction[0][index]],reaction[0],matches[index],reaction[1]])
elif idx2 < len(element):
if tuple([element[idx1],element[idx2]]) in conventionDict.keys():
pattern = conventionDict[tuple([element[idx1],element[idx2]])]
indirectEquivalenceTranslator[pattern].append([[reaction[0][index],reaction[1][0]],reaction[0],matches[index],reaction[1]])
idx1 += 1
idx2 += 1
elif '-' in element[idx1] and '-' in element[idx2]:
if tuple([element[idx1].replace('-','+'),element[idx2].replace('-','+')]) in conventionDict.keys():
matches[index].reverse()
transformedPattern = conventionDict[tuple([element[idx1].replace('-','+'),element[idx2].replace('-','+')])]
indirectEquivalenceTranslator[transformedPattern].append([[reaction[1][0],reaction[0][index]],reaction[0],matches[index],reaction[1]])
idx1 += 1
idx2 += 1
idx1+=1
idx2+=1
def removeExactMatches(self, reactantList, productList):
"""
goes through the list of lists reactantList and productList and removes the intersection
"""
reactantFlat = Counter([y for x in reactantList for y in x])
productFlat = Counter([y for x in productList for y in x])
intersection = reactantFlat & productFlat
intersection2 = deepcopy(intersection)
newReactant = []
newProduct = []
for chemical in reactantList:
tmp = []
for element in chemical:
if intersection[element] > 0:
intersection[element] -= 1
else:
tmp.append(element)
newReactant.append(tmp)
for chemical in productList:
tmp = []
for element in chemical:
if intersection2[element] > 0:
intersection2[element] -= 1
else:
tmp.append(element)
newProduct.append(tmp)
return newReactant,newProduct
def findBiggestActionable(self,chemicalList, chemicalCandidatesList):
actionableList = []
for chemical,chemicalCandidates in zip(chemicalList,chemicalCandidatesList):
if len(chemicalCandidates) == 0:
return None
if len(chemicalCandidates) == 1:
actionableList.append([chemical])
continue
# find all combinations
scoreDict = []
result = 0
try:
for i in xrange(1, len(chemicalCandidates)+1):
combinations = list(itertools.permutations(chemicalCandidates,i))
for x in combinations:
score = difflib.SequenceMatcher(None,'_'.join(x), chemical).quick_ratio()
if score == 1:
result = x
raise IOError
elif score > 0:
scoreDict.append([x, score])
except IOError:
scoreDict = [[result,1.0]]
scoreDict.sort(key=lambda x:[x[1],-len(x[0])], reverse=True)
if len(scoreDict) > 0:
actionableList.append(list(scoreDict[0][0]))
else:
print actionableList
raise Exception
return actionableList
def breakByActionableUnit(self, reaction, strippedMolecules):
#find valid actionable units from the list of molecules in the system
validCandidatesReactants = [[y for y in strippedMolecules if y in x] for x in reaction[0]]
validCandidatesProducts = [[y for y in strippedMolecules if y in x] for x in reaction[1]]
# find the subset of intersection parts between reactants and products
intermediateVector = [list(itertools.ifilter(lambda x: any([len([z for z in difflib.ndiff(x,y) if '+' in z[0] or '-' in z[0]]) <= 3 for z in validCandidatesProducts for y in z]), sublist)) for sublist in validCandidatesReactants]
intermediateVector = [list(itertools.ifilter(lambda x: any([len([z for z in difflib.ndiff(x,y) if '+' in z[0] or '-' in z[0]]) <= 3 for z in intermediateVector for y in z]), sublist)) for sublist in validCandidatesProducts]
tmpReactant = [[list(itertools.ifilter(lambda y:len([x for x in intermediateVector[0] if y in x]) == 1, reactant))] for reactant in validCandidatesReactants]
tmpProduct = [[list(itertools.ifilter(lambda y:len([x for x in intermediateVector[0] if y in x]) == 1, reactant))] for reactant in validCandidatesProducts]
#print validCandidatesReactants,validCandidatesProducts,intermediateVector
#print '......',reaction
#print '\t......',validCandidatesReactants,validCandidatesProducts
#find biggest subset of actionable units
reactantList = self.findBiggestActionable(reaction[0],validCandidatesReactants)
productList = self.findBiggestActionable(reaction[1],validCandidatesProducts)
#print '\t\t+++++',reactantList,productList
return reactantList,productList
def testAgainstExistingConventions(self, fuzzyKey, modificationList, threshold=4):
@memoize
def testAgainstExistingConventionsHelper(fuzzyKey, modificationList, threshold):
if not fuzzyKey:
return None
for i in xrange(1, threshold):
combinations = itertools.permutations(modificationList, i)
validKeys = list(itertools.ifilter(lambda x: (''.join(x)).upper() == fuzzyKey.upper(), combinations))
if (validKeys):
return validKeys
return None
return testAgainstExistingConventionsHelper(fuzzyKey, modificationList, threshold)
def classifyReactions(self, reactions, molecules, externalDependencyGraph={}):
'''
classifies a group of reaction according to the information in the json
config file
FIXME:classifiyReactions function is currently the biggest bottleneck in atomizer, taking up
to 80% of the time without counting pathwaycommons querying.
'''
def createArtificialNamingConvention(reaction, fuzzyKey, fuzzyDifference):
'''
Does the actual data-structure filling if
a 1-1 reaction shows sign of modification. Returns True if
a change was performed
'''
#fuzzyKey,fuzzyDifference = self.processAdHocNamingConventions(reaction[0][0],reaction[1][0],localSpeciesDict,compartmentChangeFlag)
if fuzzyKey and fuzzyKey.strip('_').lower() not in [x.lower() for x in strippedMolecules]:
# if our state isnt yet on the dependency graph preliminary data structures
if '{0}'.format(fuzzyKey) not in equivalenceTranslator:
# print '---','{0}'.format(fuzzyKey),equivalenceTranslator.keys()
# check if there is a combination of existing keys that deals with this modification without the need of creation a new one
if self.testAgainstExistingConventions(fuzzyKey,self.namingConventions['modificationList']):
logMess('INFO:LAE005', 'added relationship through existing convention in reaction {0}'.format(str(reaction)))
if '{0}'.format(fuzzyKey) not in equivalenceTranslator:
equivalenceTranslator['{0}'.format(fuzzyKey)] = []
if '{0}'.format(fuzzyKey) not in indirectEquivalenceTranslator:
indirectEquivalenceTranslator['{0}'.format(fuzzyKey)] = []
if tuple(sorted([x[0] for x in reaction],key=len)) not in equivalenceTranslator['{0}'.format(fuzzyKey)]:
equivalenceTranslator['{0}'.format(fuzzyKey)].append(tuple(sorted([x[0] for x in reaction],key=len)))
return
logMess('INFO:LAE004', '{0}:{1}:added induced naming convention'.format(reaction[0][0],reaction[1][0]))
equivalenceTranslator['{0}'.format(fuzzyKey)] = []
if fuzzyKey == '0':
tmpState = 'ON'
else:
tmpState = fuzzyKey.upper()
adhocLabelDictionary['{0}'.format(fuzzyKey)] = ['{0}'.format(fuzzyKey),tmpState]
#fill main naming convention data structure
self.namingConventions['modificationList'].append('{0}'.format(fuzzyKey))
self.namingConventions['reactionState'].append(tmpState)
self.namingConventions['reactionSite'].append('{0}'.format(fuzzyKey))
self.namingConventions['patterns'][fuzzyDifference] = '{0}'.format(fuzzyKey)
self.namingConventions['definitions'].append({'rst':len(self.namingConventions['reactionState'])-1,
'rsi':len(self.namingConventions['reactionSite'])-1})
if fuzzyKey not in translationKeys:
translationKeys.append(fuzzyKey)
#if this same definition doesnt already exist. this is to avoid cycles
if tuple(sorted([x[0] for x in reaction],key=len)) not in equivalenceTranslator['{0}'.format(fuzzyKey)]:
equivalenceTranslator['{0}'.format(fuzzyKey)].append(tuple(sorted([x[0] for x in reaction],key=len)))
newTranslationKeys.append(fuzzyKey)
conventionDict[fuzzyDifference] = '{0}'.format(fuzzyKey)
if '{0}'.format(fuzzyKey) not in indirectEquivalenceTranslator:
indirectEquivalenceTranslator['{0}'.format(fuzzyKey)] = []
return True
return False
# load the json config file
reactionDefinition = self.loadConfigFiles(self.configurationFile)
rawReactions = []
for x in reactions:
tmp = parseReactions(x)
if tmp:
rawReactions.append(tmp)
#rawReactions = [parseReactions(x) for x in reactions if parseReactions(x)]
strippedMolecules = [x.strip('()') for x in molecules]
reactionnetworkelements = set([z for x in rawReactions for y in x for z in y])
#only keep those molecuels that appear in the reaction network
strippedMolecules = [x for x in strippedMolecules if x in reactionnetworkelements]
# load user defined complexes
if self.speciesEquivalences != None:
self.userEquivalences = self.loadConfigFiles(self.speciesEquivalences)['reactionDefinition']
# determines if two molecules have a relationship according to the naming convention section
#equivalenceTranslator is a dictionary of actual modifications
#example {'Phosporylation':[['A','A_p'],['B','B_p']]}
#process straightforward naming conventions
#XXX: we should take this function out of processNamingConventions2 and all process that calls it
tmpTranslator,translationKeys,conventionDict = detectOntology.analyzeNamingConventions(strippedMolecules,
self.userNamingConventions,similarityThreshold=10)
userEquivalenceTranslator, _, _ = self.processNamingConventions2(strippedMolecules,onlyUser=True)
for element in tmpTranslator:
if element in userEquivalenceTranslator:
userEquivalenceTranslator[element].extend(tmpTranslator[element])
else:
userEquivalenceTranslator[element] = tmpTranslator[element]
equivalenceTranslator = copy(userEquivalenceTranslator)
newTranslationKeys = []
adhocLabelDictionary = {}
# lists of plain reactions
# process fuzzy naming conventions based on reaction information
indirectEquivalenceTranslator = {x: [] for x in equivalenceTranslator}
localSpeciesDict = defaultdict(lambda: defaultdict(list))
trueBindingReactions = []
# the lexical dependencyGraph merely applies lexical analysis to detect which components in the left hand size
# matches to different ones in the right hand size of a given reaction
lexicalDependencyGraph = defaultdict(list)
strippedMolecules = [x.strip('()') for x in molecules]
#only keep those molecuels that appear in the reaction network
strippedMolecules = [x for x in strippedMolecules if x in reactionnetworkelements]
for idx,reaction in enumerate(rawReactions):
flagstar = False
if len(reaction[0]) == 1 and len(reaction[1]) == 1 \
and len(reaction[0][0]) > len(reaction[1][0]):
#unmodification/relaxatopn
flagstar = True
reaction = [reaction[1], reaction[0]]
#should we reuse information obtained from other methods?
#FIXME: instead of doing a simple split by '_' we should be comparing against the molecules in stripped molecules and split by smallest actionable units.
if externalDependencyGraph == {}:
#print '-----',reaction
#reactantString, productString = self.breakByActionableUnit(reaction, strippedMolecules)
#print '...',reaction, reactantString, productString
#if not reactantString or not productString:
reactantString = [x.split('_') for x in reaction[0]]
reactantString = [[y for y in x if y!=''] for x in reactantString]
productString = [x.split('_') for x in reaction[1]]
productString = [[y for y in x if y!=''] for x in productString]
else:
reactantString = []
productString = []
#check how the reactants are composed and add it to the list
for element in reaction[0]:
if element not in externalDependencyGraph or externalDependencyGraph[element] == []:
reactantString.append([element])
else:
reactantString.append(deepcopy(externalDependencyGraph[element][0]))
#same for products
for element in reaction[1]:
if element not in externalDependencyGraph or externalDependencyGraph[element] == []:
productString.append([element])
else:
productString.append(deepcopy(externalDependencyGraph[element][0]))
# remove those chemicals that match exactly on both sides since those are not interesting.
# and unlike lexical pattern matching we are not going to go around trying to increase string size
reactantString, productString = self.removeExactMatches(reactantString, productString)
if [0] in reactantString or [0] in productString:
continue
matching, matching2 = self.approximateMatching2(reactantString, productString, strippedMolecules, translationKeys)
#print reaction, matching
#if matching and flagstar:
# logMess('DEBUG:Atomization', 'inverting order of {0} for lexical analysis'.format([reaction[1], reaction[0]]))
flag = True
if matching:
for reactant,matches in zip(reaction[1],matching):
for match in matches:
pair = list(match)
pair.sort(key=len)
fuzzyList = self.processAdHocNamingConventions(pair[0],
pair[1],localSpeciesDict,False,strippedMolecules)
for fuzzyReaction,fuzzyKey,fuzzyDifference in fuzzyList:
if fuzzyKey == None and fuzzyReaction[0] != fuzzyReaction[1]:
flag= False
#logMess('Warning:ATOMIZATION','We could not a meaningful \
#mapping in {0} when lexically analyzing {1}.'.format(pair,reactant))
createArtificialNamingConvention(fuzzyReaction,
fuzzyKey, fuzzyDifference)
if flag and sorted([x[1] for x in matches]) not in lexicalDependencyGraph[reactant]:
# dont introduce cyclical dependencies
if all([x[1] != reactant for x in matches]):
lexicalDependencyGraph[reactant].append(sorted([x[1] for x in matches]))
for x in matches:
# TODO(Oct14): it would be better to try to map this to an
# existing molecule instead of trying to create a new one
if x[1] not in strippedMolecules:
if len(x[1]) > len(x[0]):
lexicalDependencyGraph[x[1]] = [[x[0]]]
else:
lexicalDependencyGraph[x[0]] = [[x[1]]]
lexicalDependencyGraph[x[1]] = []
translationKeys.extend(newTranslationKeys)
for species in localSpeciesDict:
speciesName = localSpeciesDict[species][localSpeciesDict[species].keys()[0]][0][0]
definition = [species]
sdefinition = [speciesName]
for component in localSpeciesDict[species]:
cdefinition = []
states = [["s",state[1]] for state in
localSpeciesDict[species][component]]
for state in states:
cdefinition.extend(state)
cdefinition = [component,cdefinition]
sdefinition.extend(cdefinition)
definition.append([sdefinition])
self.lexicalSpecies.append(definition)
#definition = [commonRoot,[[commonRoot,componentName,["s",tag]]]]
reactionClassification = self.getReactionClassification(reactionDefinition,
rawReactions,equivalenceTranslator,
indirectEquivalenceTranslator,
translationKeys)
for element in trueBindingReactions:
reactionClassification[element] = 'Binding'
listOfEquivalences = []
for element in equivalenceTranslator:
listOfEquivalences.extend(equivalenceTranslator[element])
return reactionClassification,listOfEquivalences,equivalenceTranslator, \
indirectEquivalenceTranslator,adhocLabelDictionary,lexicalDependencyGraph, \
userEquivalenceTranslator
def processAnnotations(self,molecules,annotations):
processedAnnotations = []
for element in annotations:
if len(annotations[element]) > 1:
pro = [list(x) for x in itertools.combinations([y for y in annotations[element]],2)]
processedAnnotations.extend(pro)
return {-1:processedAnnotations}
def classifyReactionsWithAnnotations(self,reactions,molecules,annotations,labelDictionary):
'''
this model will go through the list of reactions and assign a 'modification' tag to those reactions where
some kind of modification goes on aided through annotation information
'''
rawReactions = [parseReactions(x) for x in reactions]
equivalenceTranslator = self.processAnnotations(molecules,annotations)
for reactionIndex in range(0,len(rawReactions)):
for reactantIndex in range(0,len(rawReactions[reactionIndex])):
tmp = []
for chemicalIndex in range(0,len(rawReactions[reactionIndex][reactantIndex])):
tmp.extend(list(labelDictionary[rawReactions[reactionIndex][reactantIndex][chemicalIndex]]))
rawReactions[reactionIndex][reactantIndex] = tmp
#self.annotationClassificationHelper(rawReactions,equivalenceTranslator[-1])
def userJsonToDataStructure(self, patternName, userEquivalence, dictionary,
labelDictionary, equivalencesList):
'''
converts a user defined species to an internal representation
'''
tmp = st.Species()
label = []
for molecule in userEquivalence[1]:
if molecule[0] == 0:
labelDictionary[patterName] = 0
return
tmp2 = st.Molecule(molecule[0])
for componentIdx in range(1, len(molecule), 2):
tmp3 = st.Component(molecule[componentIdx])
for bindStateIdx in range(0, len(molecule[componentIdx + 1]), 2):
if molecule[componentIdx + 1][bindStateIdx] == "b":
tmp3.addBond(molecule[componentIdx + 1][bindStateIdx + 1])
elif molecule[componentIdx + 1][bindStateIdx] == "s":
tmp3.addState('0')
tmp3.addState(molecule[componentIdx + 1][bindStateIdx + 1])
equivalencesList.append([patternName, molecule[0]])
#tmp3.addState(molecule[2][2])
tmp2.addComponent(tmp3)
stmp = st.Species()
stmp.addMolecule(deepcopy(tmp2))
stmp.reset()
# in case one definition overlaps another
if molecule[0] in dictionary:
dictionary[molecule[0]].extend(deepcopy(stmp))
else:
dictionary[molecule[0]] = deepcopy(stmp)
labelDictionary[molecule[0]] = [(molecule[0],)]
label.append(molecule[0])
#for component in tmp2.components:
# if component.name == molecule[1]:
# component.setActiveState(molecule[2][1])
tmp.addMolecule(tmp2)
if patternName in dictionary:
dictionary[patternName].extend(deepcopy(tmp))
else:
dictionary[patternName] = deepcopy(tmp)
labelDictionary[patternName] = [tuple(label)]
def getUserDefinedComplexes(self):
dictionary = {}
partialDictionary = {}
userLabelDictionary = {}
equivalencesList = []
lexicalLabelDictionary = {}
if self.speciesEquivalences is not None:
speciesdictionary = self.loadConfigFiles(self.speciesEquivalences)
userEquivalences = speciesdictionary['complexDefinition'] \
if 'complexDefinition' in speciesdictionary else None
for element in userEquivalences:
self.userJsonToDataStructure(element[0], element, dictionary,
userLabelDictionary, equivalencesList)
complexEquivalences = speciesdictionary['modificationDefinition']
for element in complexEquivalences:
userLabelDictionary[element] = [tuple(complexEquivalences[element])]
partialUserEquivalences = speciesdictionary['partialComplexDefinition'] \
if 'partialComplexDefinition' in speciesdictionary else []
for element in partialUserEquivalences:
self.userJsonToDataStructure(tuple(sorted(element[0])), element, partialDictionary,
{}, [])
#stuff we got from string similarity
for element in self.lexicalSpecies:
self.userJsonToDataStructure(element[0], element, dictionary, lexicalLabelDictionary,
equivalencesList)
return dictionary, userLabelDictionary, lexicalLabelDictionary, partialDictionary
|
py | 7dfd2710ac1f434394b75b62d3a1eda2e96147cd | from flask import Blueprint, jsonify, redirect, request
from flask_jwt_extended import get_jwt_identity, jwt_required
from github import Github
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from src.backend.config import DATABASE_URI
from src.backend.model.code_repository import CodeRepository
from src.backend.model.notebook import Notebook
# pylint: disable=no-member
# Remove this when pylint no longer shows a false-positive "no-member" for SQLAlchemy "Session"
# Issue: https://github.com/PyCQA/pylint/issues/3610
SESSION = sessionmaker(create_engine(DATABASE_URI))
NOTEBOOKS_BLUEPRINT = Blueprint('notebooks', __name__)
@NOTEBOOKS_BLUEPRINT.route('/notebooks')
@jwt_required()
def get_all_notebooks():
session = SESSION()
return jsonify({
"notebooks": list(
map(
lambda notebook: {
'id': notebook.id,
'title': notebook.title,
'summary': notebook.summary,
'sha': notebook.sha,
'repoId': notebook.code_repo_id,
'repoName': notebook.code_repo.name
},
session.query(Notebook).all()))
})
@NOTEBOOKS_BLUEPRINT.route('/notebooks/<notebook_id>')
@jwt_required()
def get_notebook(notebook_id):
session = SESSION()
notebook = session.query(Notebook).get(int(notebook_id))
if notebook is None:
return jsonify({}), 404
return redirect(
f'/api/coderepositories/{notebook.code_repo_id}/file?path={notebook.path}&sha={notebook.sha}'
)
@NOTEBOOKS_BLUEPRINT.route('/notebooks/<notebook_id>', methods=['DELETE'])
@jwt_required()
def delete_notebook(notebook_id):
session = SESSION()
session.query(Notebook).filter(Notebook.id == int(notebook_id)).delete()
session.commit()
return jsonify({})
@NOTEBOOKS_BLUEPRINT.route('/notebooks', methods=['POST'])
@jwt_required()
def import_notebooks():
session = SESSION()
body = request.json if request.data else None
github = Github(get_jwt_identity())
if body and 'codeRepositories' in body:
code_repos = body['codeRepositories']
code_repos_ids = set(map(lambda repo: repo['id'], code_repos))
code_repos_owners = set(map(lambda repo: repo['owner'], code_repos))
else:
# no repos passed!
# update notebooks for current repos
code_repos = session.query(CodeRepository).all()
code_repos_ids = set(map(lambda repo: repo.id, code_repos))
code_repos_owners = set(map(lambda repo: repo.owner, code_repos))
notebooks_added = 0
notebooks_updated = 0
for owner in code_repos_owners:
notebooks = filter(
lambda notebook: notebook.repository.id in code_repos_ids,
github.search_code(f'user:{owner} extension:ipynb'))
for notebook in notebooks:
notebook_db = session.query(Notebook).filter(
Notebook.path == notebook.path and
Notebook.code_repo_id == notebook.repository.id).first()
if notebook_db:
notebook_db.sha = notebook.sha
notebooks_updated += 1
else:
if session.query(CodeRepository).get(
notebook.repository.id) is None:
# create repo
session.add(
CodeRepository(id=notebook.repository.id,
name=notebook.repository.name,
owner=owner))
session.add(
Notebook(code_repo_id=notebook.repository.id,
sha=notebook.sha,
path=notebook.path,
title=notebook.name,
summary=''))
notebooks_added += 1
session.commit()
return jsonify({
'notebooksAdded': notebooks_added,
'notebooksUpdated': notebooks_updated
})
|
py | 7dfd28b1821477d791f845893efeb66b83bde040 | # Generated by Django 2.0.13 on 2020-09-25 15:19
from django.db import migrations, models
import jsonfield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FeatureFlags',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('feature_flags', jsonfield.fields.JSONField(default=dict)),
('date_updated', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['date_updated'],
'managed': True,
},
),
]
|
py | 7dfd28cbb19cdd0180e52a8b0fbc695cff73f667 | """
Import statements for testing nero modules
David Oniani
Licensed under MIT License.
"""
from nero.engine import Core
from nero.interactions import Information, InteractiveHelp, Function
__all__ = ["Core", "Information", "InteractiveHelp", "Function"]
|
py | 7dfd2b1490da4ec8c2f13cf193c0f8aa21590bab | """drf_tutorial_part1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('snippets.urls')),
]
|
py | 7dfd2bbaa7c9da5417ae12298d438e3d25a02bfc | from epubContainers import opf,container
import os
import zipfile
from datetime import datetime
from html import *
class epub:
def __init__(self,title='title',identifier='id',language='en',mimetype="MediaType\\mime.txt"):
#create opf
self.opf = opf()
self.opf.addMetaDC("title",'title',title)
self.opf.addMetaDC("identifier",'bookid',identifier)
self.opf.addMetaDC("language",'',language)
self.opf.addMetaProperty("dcterms:modified",datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"))
#create container xml
self.container = container()
self.container.addRootFile({"full-path":"OEBPS/content.opf","media-type":"application/oebps-package+xml"})
#string for mimetype file
self.mimetype = "application/epub+zip"
#holds references to each object to put in the epub
self.objects = []
#get set of mimetypes
self.mimetypes = self.getMimeTypes(mimetype)
#get map of extensions and mimetypes
def getMimeTypes(self,fileName):
mt = {}
with open(fileName,'r') as file:
for l in list(file):
mimetype = l.strip().split('\t')
mt[mimetype[0]] = mimetype[1]
return mt
#return associated mimetype from extension
def getMediaType(self,extension):
if extension in self.mimetypes:
return self.mimetypes[extension]
return ''
#use if need to create custom meta data
def addMeta(self,name,params,text):
self.opf.setMeta(name,params,text)
#update dc:identifier in opf
def setID(self,id):
self.opf.setMeta("dc:identifier",{"id":"bookid"},id)
#update dc:title in opf
def setTitle(self,title):
self.opf.setMeta("dc:title",{"id":"title"},title)
#update dc:language in opf
def setLan(sef,lan):
sef.opf.setMeta("dc:language",{},lan)
def addFile(self,filePath,isNav = False):
fileName = filePath.split('\\')[-1]
extension = fileName.split('.')[-1]
properties = {}
if isNav:
properties["properties"] = "nav"
properties["id"] = fileName.split('.')[0]
properties["href"] = fileName
properties["media-type"] = self.getMediaType(extension.strip())
#add necessary references
self.objects.append(filePath)
self.opf.addManifestItem("item",properties)
if properties["media-type"] == "application/xhtml+xml":
if isNav:
#if toc make first item
self.opf.addSpineFront("itemref",{"idref":properties["id"]})
else:
self.opf.addSpineItem("itemref",{"idref":properties["id"]})
#add folder and if toc exists in here define toc Name
def addFolder(self,folderPath,tocName=''):
try:
if os.path.isfile(folderPath+"\\"+tocName):
self.addFile(folderPath+"\\"+tocName,True)
for f in os.listdir(folderPath):
path = '\\'.join([folderPath,f])
if os.path.isfile(path):
if f != tocName:
self.addFile(path)
except FileNotFoundError:
print(folderPath + "can not be found")
#save to location
def save(self,loc):
if os.path.exists(loc+".epub"):
print(loc+" already exists. please choose another place to save")
else:
with zipfile.ZipFile(loc + '.epub','w') as zip:
#write mimetype,container and opf files
zip.writestr('mimetype',self.mimetype)
zip.writestr('META-INF\\container.xml',self.container.toString())
zip.writestr('OEBPS\\content.opf',self.opf.toString())
#write objects
for o in self.objects:
zip.write(o,"OEBPS\\"+o.split('\\')[-1]) |
py | 7dfd2c1a41e95222fd8c83b7693b52f85f750028 | """
Django settings for DAIPROJECT project.
Generated by 'django-admin startproject' using Django 2.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
WSGI_APPLICATION = 'DAIPROJECT.wsgi.application'
ROOT_URLCONF = 'DAIPROJECT.urls'
LANGUAGE_CODE = 'es-ES'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/login'
ACCOUNT_LOGOUT_ON_GET = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = os.path.join(BASE_DIR, '/static/')
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Database
# https://nesdis.github.io/djongo/integrating-django-with-mongodb/
DATABASES = {
'default': {
'ENGINE': 'djongo',
'ENFORCE_SCHEMA': True,
'NAME': '',
'HOST': 'localhost',
'PORT': 27017,
# 'USER': 'user',
# 'PASSWORD': 'pass',
# 'AUTH_SOURCE': 'db-name',
# 'AUTH_MECHANISM': 'SCRAM-SHA-1',
# 'REPLICASET': 'replicaset',
# 'SSL': 'ssl',
# 'SSL_CERTFILE': 'ssl_certfile',
# 'SSL_CA_CERTS': 'ssl_ca_certs',
# 'READ_PREFERENCE': 'read_preference'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# For templates allauth
# https://wsvincent.com/django-allauth-tutorial/
# https://wsvincent.com/django-allauth-tutorial-custom-user-model/
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Posibles opciones por defecto para la siguiente variable
# cambiar por la que nosotros queremos usar.
#
ACCOUNT_FORMS = {
'login': 'restaurantes.forms.MyLoginForm',
'signup': 'restaurantes.forms.MySignupForm',
# 'add_email': 'allauth.account.forms.AddEmailForm',
# 'change_password': 'allauth.account.forms.ChangePasswordForm',
# 'set_password': 'allauth.account.forms.SetPasswordForm',
# 'reset_password': 'allauth.account.forms.ResetPasswordForm',
# 'reset_password_from_key': 'allauth.account.forms.ResetPasswordKeyForm',
# 'disconnect': 'allauth.socialaccount.forms.DisconnectForm',
}
|
py | 7dfd2caaae61d0679ef77ab28eef2a8c9b1cf683 |
class HTTP:
def __init__(self, raw_data):
try:
self.data = raw_data.decode('utf-8')
except:
self.data = raw_data
|
py | 7dfd2cc4134f4f904443de7879f4148a6d3096ad | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.toolbar_base import CMSToolbar
from cms.toolbar_pool import toolbar_pool
from cms.utils.urlutils import admin_reverse
from django.core.urlresolvers import reverse
from django.utils.translation import override, ugettext_lazy as _
from .settings import get_setting
@toolbar_pool.register
class BlogToolbar(CMSToolbar):
def populate(self):
if (not self.is_current_app and not get_setting('ENABLE_THROUGH_TOOLBAR_MENU')) or \
not self.request.user.has_perm('djangocms_blog.add_post'):
return # pragma: no cover
admin_menu = self.toolbar.get_or_create_menu('djangocms_blog', _('Blog'))
with override(self.current_lang):
url = reverse('admin:djangocms_blog_post_changelist')
admin_menu.add_modal_item(_('Post list'), url=url)
url = reverse('admin:djangocms_blog_post_add')
admin_menu.add_modal_item(_('Add post'), url=url)
current_config = getattr(self.request, get_setting('CURRENT_NAMESPACE'), None)
if current_config:
url = reverse('admin:djangocms_blog_blogconfig_change', args=(current_config.pk,))
admin_menu.add_modal_item(_('Edit configuration'), url=url)
current_post = getattr(self.request, get_setting('CURRENT_POST_IDENTIFIER'), None)
if current_post and self.request.user.has_perm('djangocms_blog.change_post'): # pragma: no cover # NOQA
admin_menu.add_modal_item(_('Edit Post'), reverse(
'admin:djangocms_blog_post_change', args=(current_post.pk,)),
active=True)
def add_publish_button(self):
"""
Adds the publish button to the toolbar if the current post is unpublished
"""
current_post = getattr(self.request, get_setting('CURRENT_POST_IDENTIFIER'), None)
if (self.toolbar.edit_mode and current_post and
not current_post.publish and
self.request.user.has_perm('djangocms_blog.change_post')
): # pragma: no cover # NOQA
classes = ['cms-btn-action', 'blog-publish']
title = _('Publish {0} now').format(current_post.app_config.object_name)
url = admin_reverse('djangocms_blog_publish_article', args=(current_post.pk,))
self.toolbar.add_button(title, url=url, extra_classes=classes, side=self.toolbar.RIGHT)
def post_template_populate(self):
current_post = getattr(self.request, get_setting('CURRENT_POST_IDENTIFIER'), None)
if current_post and self.request.user.has_perm('djangocms_blog.change_post'): # pragma: no cover # NOQA
# removing page meta menu, if present, to avoid confusion
try: # pragma: no cover
import djangocms_page_meta # NOQA
menu = self.request.toolbar.get_or_create_menu('page')
pagemeta = menu.get_or_create_menu('pagemeta', 'meta')
menu.remove_item(pagemeta)
except ImportError:
pass
# removing page tags menu, if present, to avoid confusion
try: # pragma: no cover
import djangocms_page_tags # NOQA
menu = self.request.toolbar.get_or_create_menu('page')
pagetags = menu.get_or_create_menu('pagetags', 'tags')
menu.remove_item(pagetags)
except ImportError:
pass
self.add_publish_button()
|
py | 7dfd2d1f9d466dc036557f49e5cfc474adeae75f | import asyncio
from bleak import BleakScanner
async def main():
devices = await BleakScanner.discover()
for d in devices:
print(d)
# asyncio.run(main())
import bluetooth
nearby_devices = bluetooth.discover_devices(lookup_names=True)
print("Found {} devices.".format(len(nearby_devices)))
for addr, name in nearby_devices:
print(" {} - {}".format(addr, name))
|
py | 7dfd2d2c218750c6721697089ce99e6ad8311127 | #!/usr/bin/python
# Copyright 2017 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mendertesting import MenderTesting
from common_setup import *
from MenderAPI import adm, deviceauth, inv
import pytest
import json
import logging
from Crypto.PublicKey import RSA
import time
class TestPreauthBase(MenderTesting):
def do_test_ok_preauth_and_bootstrap(self):
"""
Test the happy path from preauthorizing a device to a successful bootstrap.
Verify that the device/auth set appear correctly in admission API results.
"""
client = get_mender_clients()[0]
# we'll use the same pub key for the preauth'd device, so get it
res = execute(Client.get_pub_key, hosts=client)
preauth_key = res[client].exportKey()
# stick an extra newline on the key - this is how a device would send it
preauth_key += '\n'
# preauthorize a new device
preauth_iddata = {"mac": "mac-preauth"}
# serialize manually to avoid an extra space (id data helper doesn't insert one)
preauth_iddata_str = "{\"mac\":\"mac-preauth\"}"
r = adm.preauth(preauth_iddata_str, preauth_key)
assert r.status_code == 201
# verify the device appears correctly in api results
devs = adm.get_devices(2)
dev_preauth = [d for d in devs if d['status'] == 'preauthorized']
assert len(dev_preauth) == 1
dev_preauth = dev_preauth[0]
assert dev_preauth['device_identity'] == preauth_iddata_str
assert dev_preauth['key'] == preauth_key
# make one of the existing devices the preauthorized device
# by substituting id data and restarting
res = execute(Client.substitute_id_data, preauth_iddata, hosts=client)
res = execute(Client.restart, hosts=client)
# verify api results - after some time the device should be 'accepted'
for _ in range(120):
time.sleep(15)
dev_accepted = adm.get_devices_status(status="accepted", expected_devices=2)
if len([d for d in dev_accepted if d['status'] == 'accepted']) == 1:
break
logging.info("devices: " + str(dev_accepted))
dev_accepted = [d for d in dev_accepted if d['status'] == 'accepted']
logging.info("accepted devices: " + str(dev_accepted))
execute(Client.get_logs, hosts=client)
assert len(dev_accepted) == 1, "looks like the device was never accepted"
dev_accepted = dev_accepted[0]
logging.info("accepted device: " + str(dev_accepted))
assert dev_accepted['device_identity'] == preauth_iddata_str
assert dev_accepted['key'] == preauth_key
# verify device was issued a token
res = execute(Client.have_authtoken, hosts=client)
assert res[client]
def do_test_ok_preauth_and_remove(self):
"""
Test the removal of a preauthorized auth set, verify it's gone from all API results.
"""
# preauthorize
preauth_iddata = "{\"mac\":\"preauth-mac\"}"
preauth_key = "preauth-key"
r = adm.preauth(preauth_iddata, preauth_key)
assert r.status_code == 201
devs = adm.get_devices(2)
dev_preauth = [d for d in devs if d['device_identity'] == preauth_iddata]
assert len(dev_preauth) == 1
dev_preauth = dev_preauth[0]
# remove from admission
r = adm.delete_auth_set(dev_preauth['id'])
assert r.status_code == 204
# verify removed from admission
devs = adm.get_devices(1)
dev_removed = [d for d in devs if d['device_identity'] == preauth_iddata]
assert len(dev_removed) == 0
# verify removed from deviceauth
r = deviceauth.get_device(dev_preauth['id'])
assert r.status_code == 404
# verify removed from inventory
r = inv.get_device(dev_preauth['id'])
assert r.status_code == 404
def do_test_fail_preauth_existing(self):
"""
Test 'conflict' response when an identity data set already exists.
"""
# wait for the device to appear
devs = adm.get_devices(1)
dev = devs[0]
# try to preauthorize the same id data, new key
r = adm.preauth(dev['device_identity'], 'preauth-key')
assert r.status_code == 409
class TestPreauth(TestPreauthBase):
@pytest.mark.skip(reason="there is a problem with this test: MEN-1797")
@pytest.mark.usefixtures("standard_setup_one_client")
def test_ok_preauth_and_bootstrap(self):
self.do_test_ok_preauth_and_bootstrap()
@pytest.mark.usefixtures("standard_setup_one_client")
def test_ok_preauth_and_remove(self):
self.do_test_ok_preauth_and_remove()
@pytest.mark.usefixtures("standard_setup_one_client")
def test_fail_preauth_existing(self):
self.do_test_fail_preauth_existing()
class TestPreauthMultiTenant(TestPreauthBase):
@pytest.mark.skip(reason="there is a problem with this test: MEN-1797")
@pytest.mark.usefixtures("multitenancy_setup_without_client")
def test_ok_preauth_and_bootstrap(self):
self.__create_tenant_and_container()
self.do_test_ok_preauth_and_bootstrap()
@pytest.mark.usefixtures("multitenancy_setup_without_client")
def test_ok_preauth_and_remove(self):
self.__create_tenant_and_container()
self.do_test_ok_preauth_and_remove()
@pytest.mark.usefixtures("multitenancy_setup_without_client")
def test_fail_preauth_existing(self):
self.__create_tenant_and_container()
self.do_test_fail_preauth_existing()
def __create_tenant_and_container(self):
auth.new_tenant("admin", "[email protected]", "hunter2hunter2")
token = auth.current_tenant["tenant_token"]
new_tenant_client("tenant-container", token)
client = get_mender_clients()[0]
ssh_is_opened(client)
class Client:
"""Wraps various actions on the client, performed via SSH (inside fabric.execute())."""
ID_HELPER = '/usr/share/mender/identity/mender-device-identity'
PRIV_KEY = '/data/mender/mender-agent.pem'
MENDER_STORE = '/data/mender/mender-store'
KEYGEN_TIMEOUT = 300
DEVICE_ACCEPTED_TIMEOUT = 600
MENDER_STORE_TIMEOUT = 600
@staticmethod
def get_logs():
output_from_journalctl = run("journalctl -u mender -l")
logging.info(output_from_journalctl)
@staticmethod
def get_pub_key():
"""Extract the device's public key from its private key."""
Client.__wait_for_keygen()
keystr = run('cat {}'.format(Client.PRIV_KEY))
key = RSA.importKey(keystr)
return key.publickey()
@staticmethod
def substitute_id_data(id_data_dict):
"""Change the device's identity by substituting it's id data helper script."""
id_data = '#!/bin/sh\n'
for k,v in id_data_dict.items():
id_data += 'echo {}={}\n'.format(k,v)
cmd = 'echo "{}" > {}'.format(id_data, Client.ID_HELPER)
run(cmd)
@staticmethod
def restart():
"""Restart the mender service."""
run('systemctl restart mender.service')
@staticmethod
def have_authtoken():
"""Verify that the device was authenticated by checking its data store for the authtoken."""
sleepsec = 0
while sleepsec < Client.MENDER_STORE_TIMEOUT:
try:
out = run('strings {} | grep authtoken'.format(Client.MENDER_STORE))
return out != ''
except:
output_from_journalctl = run("journalctl -u mender -l")
logging.info("Logs from client: " + output_from_journalctl)
time.sleep(10)
sleepsec += 10
logging.info("waiting for mender-store file, sleepsec: {}".format(sleepsec))
assert sleepsec <= Client.MENDER_STORE_TIMEOUT, "timeout for mender-store file exceeded"
@staticmethod
def __wait_for_keygen():
sleepsec = 0
while sleepsec < Client.KEYGEN_TIMEOUT:
try:
run('stat {}'.format(Client.PRIV_KEY))
except:
time.sleep(10)
sleepsec += 10
logging.info("waiting for key gen, sleepsec: {}".format(sleepsec))
else:
time.sleep(5)
break
assert sleepsec <= Client.KEYGEN_TIMEOUT, "timeout for key generation exceeded"
|
py | 7dfd2ece04fb2a818f911e7fae1b07cdbf25dd90 | # -*- coding: utf-8 -*-
import numpy as np
from ..misc import replace
def microstates_classify(segmentation, microstates):
"""Reorder (sort) the microstates (experimental).
Based on the pattern of values in the vector of channels (thus, depends on how channels
are ordered).
Parameters
----------
segmentation : Union[np.array, dict]
Vector containing the segmentation.
microstates : Union[np.array, dict]
Array of microstates maps . Defaults to None.
Returns
-------
segmentation, microstates
Tuple containing re-ordered input.
Examples
------------
>>> import neurokit2 as nk
>>>
>>> eeg = nk.mne_data("filt-0-40_raw").filter(1, 35) #doctest: +ELLIPSIS
Filtering raw data ...
>>> eeg = nk.eeg_rereference(eeg, 'average')
>>>
>>> # Original order
>>> out = nk.microstates_segment(eeg)
>>> nk.microstates_plot(out, gfp=out["GFP"][0:100]) #doctest: +ELLIPSIS
<Figure ...>
>>>
>>> # Reorder
>>> out = nk.microstates_classify(out["Sequence"], out["Microstates"])
"""
# Reorder
new_order = _microstates_sort(microstates)
microstates = microstates[new_order]
replacement = {i: j for i, j in enumerate(new_order)}
segmentation = replace(segmentation, replacement)
return segmentation, microstates
# =============================================================================
# Methods
# =============================================================================
def _microstates_sort(microstates):
n_states = len(microstates)
order_original = np.arange(n_states)
# For each state, get linear and quadratic coefficient
coefs_quadratic = np.zeros(n_states)
coefs_linear = np.zeros(n_states)
for i in order_original:
state = microstates[i, :]
_, coefs_linear[i], coefs_quadratic[i] = np.polyfit(
state, np.arange(len(state)), 2
)
# For each state, which is the biggest trend, linear or quadratic
order_quad = order_original[np.abs(coefs_linear) <= np.abs(coefs_quadratic)]
order_lin = order_original[np.abs(coefs_linear) > np.abs(coefs_quadratic)]
# Reorder each
order_quad = order_quad[np.argsort(coefs_quadratic[order_quad])]
order_lin = order_lin[np.argsort(coefs_linear[order_lin])]
new_order = np.concatenate([order_quad, order_lin])
return new_order
|
py | 7dfd2f9b38e91d06d4ff7e5503ef987a26aa0007 | #!.
import threading
import time
import sys
import ipdb # To be able to see error stack messages occuring in the Qt MainLoop
import os
from Settings import Settings
from MarketData import MarketData
from InputDataHandler import InputDataHandler
from GDAXController import GDAXController
from TransactionManager import TransactionManager
from Trader import Trader
from UIGraph import UIGraph
from AppState import AppState
import TradingBotConfig as theConfig
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui # Only useful for splash screen
class TradingBot(object):
def __init__(self):
cwd = os.getcwd()
print("Running Astibot in: %s" % cwd)
self.isInitializing = True
self.iterationCounter = 0
self.historicPriceIterationCounter = 0
self.app = pg.QtGui.QApplication(['Astibot'])
# Show Splash Screen
splash_pix = QtGui.QPixmap('AstibotSplash.png')
splash = QtGui.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
splash.show()
# Instanciate objects
self.theSettings = Settings()
self.theUIGraph = UIGraph(self.app, self.theSettings)
self.theGDAXControler = GDAXController(self.theUIGraph, self.theSettings)
self.theMarketData = MarketData(self.theGDAXControler, self.theUIGraph)
self.theTransactionManager = TransactionManager(self.theGDAXControler, self.theUIGraph, self.theMarketData, self.theSettings)
self.theUIGraph.UIGR_SetTransactionManager(self.theTransactionManager)
self.theTrader = Trader(self.theTransactionManager, self.theMarketData, self.theUIGraph, self.theSettings)
self.theInputDataHandler = InputDataHandler(self.theGDAXControler, self.theUIGraph, self.theMarketData, self.theTrader, self.theSettings)
self.theApp = AppState(self.theUIGraph, self.theTrader, self.theGDAXControler, self.theInputDataHandler, self.theMarketData, self.theSettings)
# Setup Main Tick Timer
self.mainTimer = pg.QtCore.QTimer()
self.mainTimer.timeout.connect(self.MainTimerHandler)
self.mainTimer.start(100)
# Hide splash screen
splash.close()
# Endless call
self.app.exec_()
# App closing
self.theGDAXControler.GDAX_closeBackgroundOperations()
self.theInputDataHandler.INDH_closeBackgroundOperations()
self.theUIGraph.UIGR_closeBackgroundOperations()
def MainTimerHandler(self):
self.theApp.APP_Execute()
if __name__ == '__main__':
theTradingBot = TradingBot()
|
py | 7dfd2fef962a18d22f94619199998cf00a4ec73d | import numpy as np
OLDSTYLE = False
DT = np.dtype([('ch', 'u1'), ('timetag', 'u8'), ('xfer', 'u2')])
DTmin = np.dtype([('ch', 'u1'), ('timetag', 'u8')])
|
py | 7dfd30330a6b5d6abe5ccbb2249f334fbab32d2b | """
# -*- coding: utf-8 -*-
-------------------------------------------------
# @Project :meiduo_mall
# @File :__init__.py.py
# @Date :2021/11/3 20:29
# @Author :turbo
# @Email :2647387166
# @Software :PyCharm
-------------------------------------------------
"""
|
py | 7dfd30ce32bc7085af64effd9fea7b390b5f9a17 | #/usr/bin/env python
from setuptools import setup
setup(
name='sample',
version='1.0',
description='A sample project with unittest-style tests.',
test_suite='tests'
)
|
py | 7dfd310ec9ae4f227fb2104fce0a486560dc37fb | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1CrossVersionObjectReference(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'api_version': 'str',
'kind': 'str',
'name': 'str'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'name': 'name'
}
def __init__(self, api_version=None, kind=None, name=None):
"""
V1CrossVersionObjectReference - a model defined in Swagger
"""
self._api_version = None
self._kind = None
self._name = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.kind = kind
self.name = name
@property
def api_version(self):
"""
Gets the api_version of this V1CrossVersionObjectReference.
API version of the referent
:return: The api_version of this V1CrossVersionObjectReference.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1CrossVersionObjectReference.
API version of the referent
:param api_version: The api_version of this V1CrossVersionObjectReference.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1CrossVersionObjectReference.
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"
:return: The kind of this V1CrossVersionObjectReference.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1CrossVersionObjectReference.
Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"
:param kind: The kind of this V1CrossVersionObjectReference.
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`")
self._kind = kind
@property
def name(self):
"""
Gets the name of this V1CrossVersionObjectReference.
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
:return: The name of this V1CrossVersionObjectReference.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this V1CrossVersionObjectReference.
Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names
:param name: The name of this V1CrossVersionObjectReference.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1CrossVersionObjectReference):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | 7dfd31da99ae902f7deaff23efce0b26ebd0acc9 | # Form implementation generated from reading ui file 'gronsfeld.ui'
#
# Created by: PyQt6 UI code generator 6.2.3
#
# WARNING: Any manual changes made to this file will be lost when pyuic6 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_gronsfeld(object):
def setupUi(self, gronsfeld):
gronsfeld.setObjectName("gronsfeld")
gronsfeld.resize(500, 400)
self.verticalLayout = QtWidgets.QVBoxLayout(gronsfeld)
self.verticalLayout.setContentsMargins(4, 4, 4, 4)
self.verticalLayout.setSpacing(12)
self.verticalLayout.setObjectName("verticalLayout")
self.group_box_input = QtWidgets.QGroupBox(gronsfeld)
self.group_box_input.setObjectName("group_box_input")
self.verticalLayout_20 = QtWidgets.QVBoxLayout(self.group_box_input)
self.verticalLayout_20.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_20.setSpacing(0)
self.verticalLayout_20.setObjectName("verticalLayout_20")
self.text_edit_input = QtWidgets.QTextEdit(self.group_box_input)
self.text_edit_input.setObjectName("text_edit_input")
self.verticalLayout_20.addWidget(self.text_edit_input)
self.verticalLayout.addWidget(self.group_box_input)
self.horizontal_layout_1 = QtWidgets.QHBoxLayout()
self.horizontal_layout_1.setContentsMargins(8, -1, -1, -1)
self.horizontal_layout_1.setSpacing(12)
self.horizontal_layout_1.setObjectName("horizontal_layout_1")
self.label_key = QtWidgets.QLabel(gronsfeld)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_key.sizePolicy().hasHeightForWidth())
self.label_key.setSizePolicy(sizePolicy)
self.label_key.setObjectName("label_key")
self.horizontal_layout_1.addWidget(self.label_key)
self.line_edit_key = QtWidgets.QLineEdit(gronsfeld)
self.line_edit_key.setObjectName("line_edit_key")
self.horizontal_layout_1.addWidget(self.line_edit_key)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontal_layout_1.addItem(spacerItem)
self.combo_box_mode = QtWidgets.QComboBox(gronsfeld)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.combo_box_mode.sizePolicy().hasHeightForWidth())
self.combo_box_mode.setSizePolicy(sizePolicy)
self.combo_box_mode.setObjectName("combo_box_mode")
self.combo_box_mode.addItem("")
self.combo_box_mode.addItem("")
self.horizontal_layout_1.addWidget(self.combo_box_mode)
self.verticalLayout.addLayout(self.horizontal_layout_1)
self.horizontal_layout_2 = QtWidgets.QHBoxLayout()
self.horizontal_layout_2.setObjectName("horizontal_layout_2")
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.horizontal_layout_2.addItem(spacerItem1)
self.button_make = QtWidgets.QPushButton(gronsfeld)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Fixed, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.button_make.sizePolicy().hasHeightForWidth())
self.button_make.setSizePolicy(sizePolicy)
self.button_make.setMinimumSize(QtCore.QSize(100, 30))
self.button_make.setObjectName("button_make")
self.horizontal_layout_2.addWidget(self.button_make)
self.verticalLayout.addLayout(self.horizontal_layout_2)
self.group_box_output = QtWidgets.QGroupBox(gronsfeld)
self.group_box_output.setObjectName("group_box_output")
self.verticalLayout_23 = QtWidgets.QVBoxLayout(self.group_box_output)
self.verticalLayout_23.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_23.setSpacing(0)
self.verticalLayout_23.setObjectName("verticalLayout_23")
self.text_edit_output = QtWidgets.QTextEdit(self.group_box_output)
self.text_edit_output.setReadOnly(True)
self.text_edit_output.setObjectName("text_edit_output")
self.verticalLayout_23.addWidget(self.text_edit_output)
self.verticalLayout.addWidget(self.group_box_output)
self.retranslateUi(gronsfeld)
QtCore.QMetaObject.connectSlotsByName(gronsfeld)
def retranslateUi(self, gronsfeld):
_translate = QtCore.QCoreApplication.translate
gronsfeld.setWindowTitle(_translate("gronsfeld", "Form"))
self.group_box_input.setTitle(_translate("gronsfeld", "Input text"))
self.label_key.setText(_translate("gronsfeld", "Key:"))
self.combo_box_mode.setItemText(0, _translate("gronsfeld", "Encrypt"))
self.combo_box_mode.setItemText(1, _translate("gronsfeld", "Decrypt"))
self.button_make.setText(_translate("gronsfeld", "Make"))
self.group_box_output.setTitle(_translate("gronsfeld", "Output text"))
|
py | 7dfd321e68b0926cf2b70d77ab5ef8c785679ec5 | # Beispielprogramm für das Buch "Python Challenge"
#
# Copyright 2020 by Michael Inden
from ch05_datastructures.solutions.ex02_stack import Stack
def reverse(values):
result = []
for i in range(len(values) - 1, -1, -1):
result.append(values[i])
return result
def reverse_with_comprehension(values):
return [values[i] for i in range(len(values) - 1, -1, -1)]
def reverse_with_comprehension_nicer(values):
return [value for value in reversed(values)]
def reverse_with_slicing(values):
return values[::-1]
def reverse_inplace(original):
left = 0
right = len(original) - 1
# laufe von links und rechts, tausche jeweils positionsbasiert die Elemente
while left < right:
left_elem = original[left]
right_elem = original[right]
# swap
original[left] = right_elem
original[right] = left_elem
left += 1
right -= 1
return original
def list_reverse_with_stack(inputs):
# Idee: Durchlaufe die Liste von vorne nach hinten (performant) und // befülle einen Stack
all_values = Stack()
for element in inputs:
all_values.push(element)
# Entleere den Stack und befülle eine Ergebnisliste final List<T> result = new ArrayList<>();
result = []
while not all_values.is_empty():
result.append(all_values.pop())
return result
def main():
print(reverse([1, 2, 3, 4, 5, 6]))
print(reverse_with_comprehension([1, 2, 3, 4, 5, 6]))
print(reverse_with_comprehension_nicer([1, 2, 3, 4, 5, 6]))
print(reverse_with_slicing([1, 2, 3, 4, 5, 6]))
print(reverse_inplace(["M", "i", "c"]))
print(reverse_inplace(["T", "I", "M"]))
print(list_reverse_with_stack(["M", "i", "c"]))
print(list_reverse_with_stack(["T", "I", "M"]))
if __name__ == "__main__":
main()
|
py | 7dfd32203da10cc25ee2cbd0f88428ecf884c831 | # A pay computation that gives the employee 1.5times the hourly rate for hours worked above 40hours
hours = int(input("Enter hours worked for; \n"))
rate = 10
hours > 40
# let hours worked above 40 *1.5 be x
X = float((hours - 40) * 1.5)
hours = 40 + X
Pay = (hours * rate)
print("Payment is: \n", Pay)
|
py | 7dfd32e5b499c98928d10f0ca84ff2fd437d37e0 | from django.contrib import admin
from .models import City
# Register your models here.
admin.site.register(City)
|
py | 7dfd334066101f85c92b47d5106b206ae0e3e436 | # flake8: noqa N806
import csv
import io
import json
import os
from typing import Set
def find_common_prefix(p1, p2):
# TODO obliterate this function
minStrLen = min(len(p1), len(p2))
firstNotMatch = 0
while firstNotMatch < minStrLen:
if p1[firstNotMatch] != p2[firstNotMatch]:
break
firstNotMatch += 1
# Maybe need to backtrack to the nearest separator if we landed in the middle of a
# subdirectory name
while firstNotMatch > 0 and p1[firstNotMatch - 1] != os.path.sep:
firstNotMatch -= 1
return p1[:firstNotMatch]
def csvContentToJsonObject(csvContent: str) -> dict:
csvReader = csv.DictReader(io.StringIO(csvContent))
print('field names: {0}'.format(csvReader.fieldnames)) # TODO remove
# First just blindly put everything in
scans_raw = []
for row in csvReader:
nextScan = {}
for fieldName in csvReader.fieldnames:
nextScan[fieldName] = row[fieldName]
scans_raw.append(nextScan)
# Gather commonalities
common_path_prefix = scans_raw[0]['nifti_folder']
experiment_dict = {}
site_set: Set[str] = set()
for scan in scans_raw:
common_path_prefix = find_common_prefix(common_path_prefix, scan['nifti_folder'])
scan['scan_path'] = '_'.join([scan['scan_id'], scan['scan_type']])
experiment_dict[scan['xnat_experiment_id']] = scan['experiment_note']
print('common path prefix: {0}'.format(common_path_prefix)) # TODO remove
# Produce final scans
scans = []
for scan in scans_raw:
nifti_folder = scan['nifti_folder']
subdir = nifti_folder.split(common_path_prefix)[1]
if 'site' in scan:
site = scan['site']
elif nifti_folder.startswith('/fs/storage/XNAT/archive/'):
# Special case handling to match previous implementation
splits = nifti_folder.split('/')
site = splits[5].split('_')[0]
else:
site = subdir[: subdir.index('_')]
site_set.add(site)
scan_obj = {
'id': scan['scan_id'],
'type': scan['scan_type'],
'note': scan['scan_note'],
'experiment_id': scan['xnat_experiment_id'],
'path': os.path.join(subdir, scan['scan_path']),
'image_pattern': r"^image[\d]*\.nii\.gz$",
'site_id': site,
}
if 'decision' in scan:
scan_obj['decision'] = scan['decision']
scans.append(scan_obj)
# Build list of unique experiments
experiments = []
for exp_id, exp_note in experiment_dict.items():
experiments.append(
{
'id': exp_id,
'note': exp_note,
}
)
return {
'data_root': common_path_prefix,
'scans': scans,
'experiments': experiments,
'sites': [{'name': site} for site in site_set],
}
def csvToJson(csvFilePath, jsonFilePath):
print('Reading input csv from {0}'.format(csvFilePath))
with open(csvFilePath) as fd:
csvContent = fd.read()
jsonObject = csvContentToJsonObject(csvContent)
print('Writing output json to {0}'.format(jsonFilePath))
with open(jsonFilePath, 'w') as fd:
json.dump(jsonObject, fd)
|
py | 7dfd3436f3a1ca126f99088a25cc2008840eefdd | from __future__ import annotations
from dataclasses import dataclass
from enum import Enum, auto
from itertools import accumulate, chain
from typing import Iterable
from aoc import load_puzzle_input, format_solution
@dataclass
class Point:
x: int = 0
y: int = 0
@property
def manhattan_distance_from_origin(self) -> int:
return abs(self.x) + abs(self.y)
def __add__(self, other: Point) -> Point:
return Point(self.x + other.x, self.y + other.y)
def __mul__(self, steps: int) -> Point:
return Point(self.x * steps, self.y * steps)
def __str__(self) -> str:
return f"({self.x}, {self.y})"
def __repr__(self) -> str:
return f"Point(x={self.x}, y={self.y})"
def __hash__(self) -> int:
return (self.x, self.y).__hash__()
class Instruction(Enum):
TURN_LEFT = auto()
TURN_RIGHT = auto()
STEP_FORWARD = auto()
@classmethod
def parse(cls, s: str) -> Iterable[Instruction]:
if s[0] == "L":
yield cls.TURN_LEFT
elif s[0] == "R":
yield cls.TURN_RIGHT
else:
raise ValueError("Invalue turn direction, must be L or R.")
for _ in range(int(s[1:])):
yield cls.STEP_FORWARD
class Direction(Enum):
NORTH = Point(0, 1)
EAST = Point(1, 0)
SOUTH = Point(0, -1)
WEST = Point(-1, 0)
def turn(self, turn: Instruction) -> Direction:
if turn is Instruction.STEP_FORWARD:
raise ValueError("'turn' must be TURN_LEFT or TURN_RIGHT.")
cw = [Direction.NORTH, Direction.EAST, Direction.SOUTH, Direction.WEST]
acw = cw[::-1]
if turn is Instruction.TURN_LEFT:
return dict(zip(acw, acw[1:] + [acw[0]]))[self]
if turn is Instruction.TURN_RIGHT:
return dict(zip(cw, cw[1:] + [cw[0]]))[self]
raise Exception("Unreachable.")
@dataclass
class State:
position: Point = Point()
heading: Direction = Direction.NORTH
last_action: Instruction | None = None
def perform_instruction(self, instruction: Instruction) -> State:
if instruction is Instruction.STEP_FORWARD:
position = self.position + self.heading.value
heading = self.heading
else:
position = self.position
heading = self.heading.turn(instruction)
return State(position, heading, instruction)
def parse_instructions(strings: list[str]) -> list[Instruction]:
return list(chain.from_iterable(Instruction.parse(s) for s in strings))
def follow_all_instructions(instructions: list[Instruction]) -> Iterable[Point]:
return (
state.position
for state in accumulate(
instructions, lambda state, i: state.perform_instruction(i), initial=State()
)
if state.last_action is Instruction.STEP_FORWARD
)
def find_first_repeat_location(
all_positions: Iterable[Point], seen: frozenset[Point] = frozenset()
) -> Point:
if not all_positions:
raise ValueError("No repeated position found.")
first, *rest = all_positions
return first if first in seen else find_first_repeat_location(rest, seen | {first})
if __name__ == "__main__":
instructions = parse_instructions(load_puzzle_input(2016, 1).split(", "))
all_positions = list(follow_all_instructions(instructions))
end_position = all_positions[-1]
first_repeat = find_first_repeat_location(all_positions)
print(
format_solution(
title="Day 1: No Time for a Taxicab",
part_one=end_position.manhattan_distance_from_origin,
part_two=first_repeat.manhattan_distance_from_origin,
)
)
|
py | 7dfd35af7d9285ff9efe2fae7a93a4af10dea21e | from otp.level import EntityCreatorBase
from otp.level import LogicGate
from otp.level import EditMgrAI
from otp.level import LevelMgrAI
from otp.level import ZoneEntityAI
from toontown.toonbase.ToonPythonUtil import Functor
def createDistributedEntity(AIclass, level, entId, zoneId):
ent = AIclass(level, entId)
ent.generateWithRequired(zoneId)
return ent
def createLocalEntity(AIclass, level, entId, zoneId):
ent = AIclass(level, entId)
return ent
def nothing(*args):
return 'nothing'
class EntityCreatorAI(EntityCreatorBase.EntityCreatorBase):
def __init__(self, level):
EntityCreatorBase.EntityCreatorBase.__init__(self, level)
cLE = createLocalEntity
self.privRegisterTypes({'attribModifier': nothing,
'ambientSound': nothing,
'collisionSolid': nothing,
'cutScene': nothing,
'editMgr': Functor(cLE, EditMgrAI.EditMgrAI),
'entityGroup': nothing,
'entrancePoint': nothing,
'levelMgr': Functor(cLE, LevelMgrAI.LevelMgrAI),
'locator': nothing,
'logicGate': Functor(cLE, LogicGate.LogicGate),
'model': nothing,
'nodepath': nothing,
'path': nothing,
'propSpinner': nothing,
'visibilityExtender': nothing,
'zone': Functor(cLE, ZoneEntityAI.ZoneEntityAI)})
def doCreateEntity(self, ctor, entId):
zoneId = self.level.getEntityZoneId(entId)
self.notify.debug('creating entity %s in zone %s' % (entId, zoneId))
return ctor(self.level, entId, zoneId) |
py | 7dfd35e8aa1ff02efe30d5d7c546747c5de47feb | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LuisResult(Model):
"""Prediction, based on the input query, containing intent(s) and entities.
:param query: The input utterance that was analyzed.
:type query: str
:param altered_query: The corrected utterance (when spell checking was
enabled).
:type altered_query: str
:param top_scoring_intent:
:type top_scoring_intent:
~azure.cognitiveservices.language.luis.runtime.models.IntentModel
:param intents: All the intents (and their score) that were detected from
utterance.
:type intents:
list[~azure.cognitiveservices.language.luis.runtime.models.IntentModel]
:param entities: The entities extracted from the utterance.
:type entities:
list[~azure.cognitiveservices.language.luis.runtime.models.EntityModel]
:param composite_entities: The composite entities extracted from the
utterance.
:type composite_entities:
list[~azure.cognitiveservices.language.luis.runtime.models.CompositeEntityModel]
:param sentiment_analysis:
:type sentiment_analysis:
~azure.cognitiveservices.language.luis.runtime.models.Sentiment
:param connected_service_result:
:type connected_service_result:
~azure.cognitiveservices.language.luis.runtime.models.LuisResult
"""
_attribute_map = {
'query': {'key': 'query', 'type': 'str'},
'altered_query': {'key': 'alteredQuery', 'type': 'str'},
'top_scoring_intent': {'key': 'topScoringIntent', 'type': 'IntentModel'},
'intents': {'key': 'intents', 'type': '[IntentModel]'},
'entities': {'key': 'entities', 'type': '[EntityModel]'},
'composite_entities': {'key': 'compositeEntities', 'type': '[CompositeEntityModel]'},
'sentiment_analysis': {'key': 'sentimentAnalysis', 'type': 'Sentiment'},
'connected_service_result': {'key': 'connectedServiceResult', 'type': 'LuisResult'},
}
def __init__(self, **kwargs):
super(LuisResult, self).__init__(**kwargs)
self.query = kwargs.get('query', None)
self.altered_query = kwargs.get('altered_query', None)
self.top_scoring_intent = kwargs.get('top_scoring_intent', None)
self.intents = kwargs.get('intents', None)
self.entities = kwargs.get('entities', None)
self.composite_entities = kwargs.get('composite_entities', None)
self.sentiment_analysis = kwargs.get('sentiment_analysis', None)
self.connected_service_result = kwargs.get('connected_service_result', None)
|
py | 7dfd36e9a4dda76f8217dbc7899a21cd781b4360 | import time
import numpy
import json
# pypot imports
import pypot.dynamixel
import math
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
import csv
import pypot.dynamixel
import sys
state_file = open("test.csv", "w")
num1 = 2
ports = pypot.dynamixel.get_available_ports()
if not ports:
raise IOError('no port found!')
print('ports found', ports)
print('connecting on the first available port:', ports[0])
dxl_io = pypot.dynamixel.DxlIO(ports[0])
def setTraj1(id, duration, coeffs):
errorCounter = 0
delay = 0.001
while True:
try:
dxl_io.set_traj1_size({id: 3})
time.sleep(delay)
dxl_io.set_duration1({id: duration})
time.sleep(delay)
dxl_io.set_a0_traj1({id: coeffs[0]})
time.sleep(delay)
dxl_io.set_a1_traj1({id: coeffs[1]})
time.sleep(delay)
dxl_io.set_a2_traj1({id: coeffs[2]})
time.sleep(delay)
break
except:
errorCounter = errorCounter + 1
# print "Nope :/"
break
# print "Nb errors : ", errorCounter
def setTraj2(id, duration, coeffs):
errorCounter = 0
delay = 0.001
while True:
try:
dxl_io.set_traj2_size({id: 3})
time.sleep(delay)
dxl_io.set_duration2({id: duration})
time.sleep(delay)
dxl_io.set_a0_traj2({id: coeffs[0]})
time.sleep(delay)
dxl_io.set_a1_traj2({id: coeffs[1]})
time.sleep(delay)
dxl_io.set_a2_traj2({id: coeffs[2]})
time.sleep(delay)
break
except:
errorCounter = errorCounter + 1
print("nb errors = ", errorCounter)
break
def setTorque1(id, duration, coeffs):
errorCounter = 0
delay = 0.001
while True:
try:
dxl_io.set_torque1_size({id: 3})
time.sleep(delay)
dxl_io.set_duration1({id: duration})
time.sleep(delay)
dxl_io.set_a0_torque1({id: coeffs[0]})
time.sleep(delay)
dxl_io.set_a1_torque1({id: coeffs[1]})
time.sleep(delay)
dxl_io.set_a2_torque1({id: coeffs[2]})
time.sleep(delay)
break
except:
errorCounter = errorCounter + 1
# print "Nope :/"
pass
# print "Nb errors : ", errorCounter
def setTorque2(id, duration, coeffs):
errorCounter = 0
delay = 0.001
while True:
try:
dxl_io.set_torque2_size({id: 3})
time.sleep(delay)
dxl_io.set_duration2({id: duration})
time.sleep(delay)
dxl_io.set_a0_torque2({id: coeffs[0]})
time.sleep(delay)
dxl_io.set_a1_torque2({id: coeffs[1]})
time.sleep(delay)
dxl_io.set_a2_torque2({id: coeffs[2]})
time.sleep(delay)
break
except:
errorCounter = errorCounter + 1
# print "Nope :/"
pass
# print "Nb errors : ", errorCounter
# ID_LIST = [1, 2, 3, 4]
# ID_SIZE = len(ID_LIST)
# DXL_DICT_1 = dict(zip(ID_LIST, [1]*ID_SIZE))
# DXL_DICT_0 = dict(zip(ID_LIST, [0]*ID_SIZE))
# DXL_DICT_PID = dict(zip(ID_LIST, [[1,0,0]]*ID_SIZE))
# dxl_io.set_mode_dynaban(DXL_DICT_1)
# time.sleep(0.1)
# dxl_io.enable_torque(DXL_DICT_1)
# time.sleep(0.1)
# dxl_io.set_pid_gain(DXL_DICT_PID)
# time.sleep(0.1)
print ("Test with PID only:")
dxl_io.set_mode_dynaban({num1:0})
time.sleep(0.1)
dxl_io.enable_torque({num1:1})
time.sleep(0.1)
dxl_io.set_goal_position({num1:0})
time.sleep(1)
dxl_io.set_pid_gain({num1:[1,0,0]})
# # print((time.time()-time_current))
# str_state = [str(dxl_io.get_present_position([num1])[0]),str(dxl_io.get_outputTorque([num1])[0])]
# state_file.write(",".join(str_state) + "\n")
# time.sleep(1)
# dxl_io.set_max_torque({num1:1024})
setTraj1(num1, 5000, [2048.0, 0.0, 0.0])
setTorque1(num1,5000, [40.0,0.0,0.0])
print ("Setting mode and tracking :")
dxl_io.set_mode_dynaban({num1:3})
setTraj2(num1, 20000, [2048.0, 512.0, 0.0])
setTorque2(num1,20000, [40.0,0.0,0.0])
dxl_io.set_copy_next_buffer({num1:1})
time_current = time.time()
while (time.time()-time_current) <= 3:
print(dxl_io.get_outputTorque([num1])[0])
str_state = [str(dxl_io.get_present_position([num1])[0]),str(dxl_io.get_outputTorque([num1])[0])]
state_file.write(",".join(str_state) + "\n")
# time_start = time.time()
# setTraj2(num1, 20000, [2048.0, 512.0, 0.0])
# dxl_io.set_copy_next_buffer({num1:1})
# time_current = time.time()
# while (time.time()-time_current) <= 2:
# # print((time.time()-time_current))
# str_state = [str(dxl_io.get_present_position([num1])[0]),str(dxl_io.get_outputTorque([num1])[0])]
# state_file.write(",".join(str_state) + "\n")
# setTraj2(num1, 20000, [3072.0, -512.0, 0.0])
# dxl_io.set_copy_next_buffer({num1:1})
# time_current = time.time()
# while (time.time()-time_current) <= 2:
# # print((time.time()-time_current))
# str_state = [str(dxl_io.get_present_position([num1])[0]),str(dxl_io.get_outputTorque([num1])[0])]
# state_file.write(",".join(str_state) + "\n")
# setTraj2(num1, 20000, [2048.0, -512.0, 0.0])
# dxl_io.set_copy_next_buffer({num1:1})
# time_current = time.time()
# while (time.time()-time_current) <= 3:
# # print((time.time()-time_current))
# str_state = [str(dxl_io.get_present_position([num1])[0]),str(dxl_io.get_outputTorque([num1])[0])]
# state_file.write(",".join(str_state) + "\n")
# time_end = time.time()
# print(time_end-time_start)
|
py | 7dfd379dd569b6f25cd8c4cb2f5f502bb2e33b87 | from typing import Dict, List, Optional, Set
import re
from collections import defaultdict
from .template_parser import (
tokenize,
FormattedException,
Token,
)
class HtmlBranchesException(Exception):
# TODO: Have callers pass in line numbers.
pass
class HtmlTreeBranch:
"""
For <p><div id='yo'>bla<span class='bar'></span></div></p>, store a
representation of the tags all the way down to the leaf, which would
conceptually be something like "p div(#yo) span(.bar)".
"""
def __init__(self, tags: List['TagInfo'], fn: Optional[str]) -> None:
self.tags = tags
self.fn = fn
self.line = tags[-1].token.line
self.words: Set[str] = set()
for tag in tags:
for word in tag.words:
self.words.add(word)
def staircase_text(self) -> str:
"""
produces representation of a node in staircase-like format:
html
body.main-section
p#intro
"""
res = '\n'
indent = ' ' * 4
for t in self.tags:
res += indent + t.text() + '\n'
indent += ' ' * 4
return res
def text(self) -> str:
"""
produces one-line representation of branch:
html body.main-section p#intro
"""
return ' '.join(t.text() for t in self.tags)
class Node:
def __init__(self, token: Token, parent: "Optional[Node]") -> None:
# FIXME parent parameter is not used!
self.token = token
self.children: List[Node] = []
self.parent: Optional[Node] = None
class TagInfo:
def __init__(self, tag: str, classes: List[str], ids: List[str], token: Token) -> None:
self.tag = tag
self.classes = classes
self.ids = ids
self.token = token
self.words = \
[self.tag] + \
['.' + s for s in classes] + \
['#' + s for s in ids]
def text(self) -> str:
s = self.tag
if self.classes:
s += '.' + '.'.join(self.classes)
if self.ids:
s += '#' + '#'.join(self.ids)
return s
def get_tag_info(token: Token) -> TagInfo:
s = token.s
tag = token.tag
classes: List[str] = []
ids: List[str] = []
searches = [
(classes, ' class="(.*?)"'),
(classes, " class='(.*?)'"),
(ids, ' id="(.*?)"'),
(ids, " id='(.*?)'"),
]
for lst, regex in searches:
m = re.search(regex, s)
if m:
for g in m.groups():
lst += split_for_id_and_class(g)
return TagInfo(tag=tag, classes=classes, ids=ids, token=token)
def split_for_id_and_class(element: str) -> List[str]:
# Here we split a given string which is expected to contain id or class
# attributes from HTML tags. This also takes care of template variables
# in string during splitting process. For eg. 'red black {{ a|b|c }}'
# is split as ['red', 'black', '{{ a|b|c }}']
outside_braces: bool = True
lst = []
s = ''
for ch in element:
if ch == '{':
outside_braces = False
if ch == '}':
outside_braces = True
if ch == ' ' and outside_braces:
if not s == '':
lst.append(s)
s = ''
else:
s += ch
if not s == '':
lst.append(s)
return lst
def html_branches(text: str, fn: Optional[str] = None) -> List[HtmlTreeBranch]:
tree = html_tag_tree(text, fn)
branches: List[HtmlTreeBranch] = []
def walk(node: Node, tag_info_list: Optional[List[TagInfo]] = None) -> None:
info = get_tag_info(node.token)
if tag_info_list is None:
tag_info_list = [info]
else:
tag_info_list = tag_info_list[:] + [info]
if node.children:
for child in node.children:
walk(node=child, tag_info_list=tag_info_list)
else:
tree_branch = HtmlTreeBranch(tags=tag_info_list, fn=fn)
branches.append(tree_branch)
for node in tree.children:
walk(node, None)
return branches
def html_tag_tree(text: str, fn: Optional[str]=None) -> Node:
tokens = tokenize(text)
top_level = Node(token=None, parent=None)
stack = [top_level]
for token in tokens:
# Add tokens to the Node tree first (conditionally).
if token.kind in ('html_start', 'html_singleton'):
parent = stack[-1]
node = Node(token=token, parent=parent)
parent.children.append(node)
# Then update the stack to have the next node that
# we will be appending to at the top.
if token.kind == 'html_start':
stack.append(node)
elif token.kind == 'html_end':
stack.pop()
return top_level
def build_id_dict(templates: List[str]) -> (Dict[str, List[str]]):
template_id_dict: (Dict[str, List[str]]) = defaultdict(list)
for fn in templates:
with open(fn) as f:
text = f.read()
try:
list_tags = tokenize(text)
except FormattedException as e:
raise Exception('''
fn: %s
%s''' % (fn, e))
for tag in list_tags:
info = get_tag_info(tag)
for ids in info.ids:
template_id_dict[ids].append("Line " + str(info.token.line) + ":" + fn)
return template_id_dict
|
py | 7dfd38501c76963f3d9c9f06c74a9c9cdab93843 | import logging
from pydantic import BaseModel, conint, constr
from datetime import datetime
from typing import ClassVar, Optional, Literal
from idunn import settings
from idunn.datasources.kuzzle import kuzzle_client
from idunn.datasources.weather import weather_client
from .base import BaseBlock
from idunn.utils.redis import RedisWrapperWeather
logger = logging.getLogger(__name__)
DISABLED_STATE = object() # Used to flag cache as disabled by settings
class ParticleType(BaseModel):
value: float
quality_index: Optional[conint(ge=1, le=5)]
class AirQuality(BaseBlock):
type: Literal["air_quality"] = "air_quality"
CO: Optional[ParticleType] = None
PM10: Optional[ParticleType] = None
O3: Optional[ParticleType] = None
NO2: Optional[ParticleType] = None
SO2: Optional[ParticleType] = None
PM2_5: Optional[ParticleType] = None
quality_index: conint(ge=1, le=5)
date: datetime
source: str
source_url: str
measurements_unit: Optional[str] = None
@classmethod
def from_es(cls, place, lang):
if (
not settings["BLOCK_AIR_QUALITY_ENABLED"]
or place.PLACE_TYPE != "admin"
or place.get("zone_type") not in ("city", "city_district", "suburb")
):
return None
bbox = place.get_bbox()
if not bbox:
return None
try:
air_quality = get_air_quality(bbox)
except Exception:
logger.warning("Failed to fetch air quality for %s", place.get_id(), exc_info=True)
return None
if not air_quality:
return None
for x in ["CO", "PM10", "O3", "NO2", "SO2", "PM2_5"]:
if x not in air_quality:
continue
entry = air_quality.get(x)
if entry == {} or entry.get("value") is None:
air_quality[x] = None
return cls(**air_quality)
def get_air_quality(geobbox):
if not kuzzle_client.enabled:
return None
return kuzzle_client.fetch_air_quality(geobbox)
class Weather(BaseBlock):
type: Literal["weather"] = "weather"
temperature: Optional[float] = None
icon: Optional[constr(regex="11d|09d|10d|13d|50d|01d|01n|02d|03d|04d|02n|03n|04n")]
_connection: ClassVar = None
@classmethod
def from_es(cls, place, lang):
if place.PLACE_TYPE != "admin":
return None
if place.get("zone_type") not in ("city", "city_district", "suburd"):
return None
coord = place.get_coord()
if not coord:
return None
try:
weather = get_local_weather(coord)
except Exception:
logger.warning("Failed to fetch weather for %s", place.get_id(), exc_info=True)
return None
if not weather:
return None
return cls(**weather)
def get_local_weather(coord):
def inner(coord):
return weather_client.fetch_weather_places(coord)
if not weather_client.enabled:
return None
key = "weather_{}_{}".format(coord["lat"], coord["lon"])
return RedisWrapperWeather.cache_it(key, inner)(coord)
|
py | 7dfd3863f476600406cc264f46aa7374ccb3a28a |
from hash_comfusion_mat import ConfMat
def main():
threshold = [[0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
[0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
]
mat = ConfMat(threshold)
confMat, tpr, fpr = mat.resMat, mat.tpr, mat.fpr
print(confMat)
print(tpr)
print(fpr)
if __name__ == "__main__":
main() |
py | 7dfd38a563127d7c14c0fae2ec5782e4d8812095 | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Bitstock-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
|
bzl | 7dfd393b6a205042a3a797aea37e0878c8b5504e | GOLANG_REVISION = "1.11.1"
GOLANG_SHA256 = "2871270d8ff0c8c69f161aaae42f9f28739855ff5c5204752a8d92a1c9f63993"
|
py | 7dfd39c028e79de8a0ab1cab683f2e61d60bf7fd | # Generic game class that can test for permissions and things for games.
class GenericGame:
def __init__(self, ws, room):
self.ws = ws
self.room = room
def isThisGame(self, game):
return type(self) == game
|
py | 7dfd3bbe3615887b6ca2036a71a36ddaf625f3e3 | from django.shortcuts import render
from django.core.paginator import Paginator, EmptyPage, \
PageNotAnInteger
from .models import Paper
def paper_list(request):
object_list = Paper.objects.all()
paginator = Paginator(object_list, 3) # 3 abstracts/page
page = request.GET.get('page')
try:
papers = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
papers = paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
papers = paginator.page(paginator.num_pages)
return render(request,
'bioarxiv/paper/list.html',
{'page': page,
'papers': papers})
|
py | 7dfd3be79c63d1802ea1d0c821197371f711055e | from redbot.core import commands
from .movie_bot import MovieNightCog
def setup(bot) -> None:
bot.add_cog(MovieNightCog(bot)) |
py | 7dfd3c1eca55b0cadc9e9c4e3ac04ba7a3a9f3ef | import dateutil.parser
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def get_incident_ids() -> list:
"""
Gets all the campaign incident ids.
Returns:
List of all the ids.
"""
incidents = demisto.get(demisto.context(), "EmailCampaign.incidents")
return [incident['id'] for incident in incidents]
def get_last_incident_occurred(incident_ids) -> str:
"""
Gets the campaign last incident occurred date.
Args:
incident_ids: All the campaign incident ids.
Returns:
The date of the last incident occurred.
"""
res = demisto.executeCommand('GetIncidentsByQuery', {
'query': "id:({})".format(' '.join(incident_ids))
})
if isError(res):
return_error(f'Error occurred while trying to get incidents by query: {get_error(res)}')
incidents_from_query = json.loads(res[0]['Contents'])
if not incidents_from_query:
return incidents_from_query
incident_created = max([dateutil.parser.parse(incident['created']) for incident in incidents_from_query])
return incident_created.strftime("%B %d, %Y")
def main():
try:
incident_ids = get_incident_ids()
last_incident_occurred = get_last_incident_occurred(incident_ids)
if last_incident_occurred:
html_readable_output = f"<div style='text-align:center; font-size:17px; padding: 15px;'>" \
f"Last Incident Occurred</br> <div style='font-size:24px;'> " \
f"{last_incident_occurred} </div></div>"
else:
html_readable_output = "<div style='text-align:center; font-size:17px; padding: 15px;'>" \
"Last Incident Occurred</br> <div style='font-size:20px;'> " \
"No last incident occurred found. </div></div>"
demisto.results({
'ContentsFormat': formats['html'],
'Type': entryTypes['note'],
'Contents': html_readable_output
})
except Exception as err:
return_error(str(err))
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
py | 7dfd3cd8a8212a57aae78912dce22afa1acdd50c | from discordbot.utils.emojis import SPLIT, STOP, ALPHABET, ARROW_LEFT, CHECKMARK, REPEAT, QUESTION
TIMEOUT = 60*5
MINIGAMES = ["Blackjack", "Chess", "Connect4", "Flood", "Mastermind", "Hangman", "Quiz", "Scramble"]
QUIZ_CATEGORIES = ["General Knowledge", "Sports", "Films", "Music", "Video Games"]
BLACKJACK_RULES = f"**Blackjack**\n" \
f"{ALPHABET['h']} to ask for extra card ('hit').\n" \
f"{ALPHABET['s']} to signal that you have enough cards ('stand').\n" \
f"{SPLIT} to split your hand when both your cards are of the same rank at the start of the game.\n" \
f"{STOP} to end the game (automatically results in loss)."
CHESS_RULES = f"**Chess**\n" \
f"Click letters and numbers to create your move.\n" \
f"{STOP} to end the game (automatically results in loss for player who pressed it)."
CONNECT4_RULES = f"**Connect4**\n" \
f"Click a number to indicate the column for your coin.\n" \
f"{STOP} to end the game (automatically results in loss for player who pressed it)."
HANGMAN_RULES = f"**Hangman**\n" \
f"Click letters to make your guess.\n" \
f"{STOP} to end the game (automatically results in loss)."
QUIZ_RULES = f"**Quiz**\n" \
f"There are 4 categories available: General Knowledge, Sports, Films, Music and Video Games.\n" \
f"First select your category, then select the right answer for your question.\n" \
f"{STOP} to end the game (automatically results in loss)."
SCRAMBLE_RULES = f"**Scramble**\n" \
f"Unscramble the given word by clicking on the letters in the correct order.\n" \
f"{ARROW_LEFT} to undo your last move.\n" \
f"{STOP} to end the game (automatically results in loss)."
FLOOD_RULES = f"**Flood**\n" \
f"Try to get the whole grid to be the same color within the given number of moves, by repeatedly flood-filling the top left corner in different colors.\n" \
f"Click one of the colors in the reactions to flood-fill the top left corner with that color.\n" \
f"{STOP} to end the game (automatically results in loss)."
MASTERMIND_RULES = f"**Mastermind**\n" \
f"Try to guess the hidden combination of colors. You will be given limited information about each guess you make, enabling you to refine the next guess.\n" \
f"{CHECKMARK} to indicate the amount of colors that are in the correct place.\n" \
f"{REPEAT} to indicate the amount of colors that are correct but in the wrong place.\n" \
f"Click one of the colors in the reactions to make your guess.\n" \
f"{ARROW_LEFT} to remove your last added color.\n" \
f"{CHECKMARK} to confirm your guess.\n" \
f"{STOP} to end the game (automatically results in loss)."
AKINATOR_RULES = f"**Akinator**\n" \
f"Think of character and by asking yes/no questions the Akinator will guess who it is. Character can be fictional or real.\n" \
f"{ALPHABET['y']} to answer the question with 'yes'.\n" \
f"{ALPHABET['n']} to answer the question with 'no'.\n" \
f"{QUESTION} if you don't know the answer.\n" \
f"{STOP} to end the game."
|
py | 7dfd3d7554290723dc935635fb56a4a9634b5e5d | import io
import zipfile
import sys
s =0
c =0
for zz in zipfile.ZipFile(io.BytesIO(bytes.fromhex(sys.stdin.read()))).filelist:
s += zz.file_size
c += 0 if zz.is_dir() else 1
print(c,s)
|
py | 7dfd42a1ccffee2714e20cf7c987f62f1e4d5f5e | """
https://plot.ly/python/
Plotly's Python API allows users to programmatically access Plotly's
server resources.
This package is organized as follows:
Subpackages:
- plotly: all functionality that requires access to Plotly's servers
- graph_objs: objects for designing figures and visualizing data
- matplotlylib: tools to convert matplotlib figures
Modules:
- tools: some helpful tools that do not require access to Plotly's servers
- utils: functions that you probably won't need, but that subpackages use
- version: holds the current API version
- exceptions: defines our custom exception classes
"""
from __future__ import absolute_import
from plotly import (plotly, dashboard_objs, graph_objs, grid_objs, tools,
utils, session, offline, colors)
from plotly.version import __version__
|
py | 7dfd4403e2d091fc2fec6b8d8e0fb20d46807565 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-04 21:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0013_remove_profilejob_state'),
]
operations = [
migrations.AlterField(
model_name='job',
name='lat',
field=models.CharField(max_length=50, verbose_name=b'Latitud'),
),
migrations.AlterField(
model_name='job',
name='lng',
field=models.CharField(max_length=50, verbose_name=b'Longitud'),
),
]
|
py | 7dfd45b92cd63061d4edf8584cdf8965eacaee5a | #! /usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright 2013-2018 Luiko Czub, TestLink-API-Python-client developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------
# TestCases for Testlink API calls, where the Api Clients uses equal
# positional arg configurations
# - TestlinkAPIClient, TestlinkAPIGeneric
#
# tests requires an online TestLink Server, which connection parameters
# are defined in environment variables
# TESTLINK_API_PYTHON_DEVKEY and TESTLINK_API_PYTHON_DEVKEY
#
# FIXME LC 29.10.29: test does not really interacts with test link
# only negative test with none existing IDs implemented
# ok to check every implemented server call one time but not
# to cover all possible responses or argument combinations
import pytest
import re
from testlink.testlinkerrors import TLResponseError
# test_ApiCall_UnknownKey_EqualBehaviour
def test_checkDevKey(api_client):
assert True == api_client.checkDevKey()
def test_checkDevKey_unknownKey(api_client):
with pytest.raises(TLResponseError, match='2000.*invalid') as excinfo:
api_client.checkDevKey(devKey='unknownKey')
def test_sayHello(api_client):
assert 'Hello!' == api_client.sayHello()
def test_repeat(api_client):
assert 'You said: Yellow Submarine' == api_client.repeat('Yellow Submarine')
def test_about(api_client):
assert 'Testlink API' in api_client.about()
def test_doesUserExist_unknownID(api_client):
with pytest.raises(TLResponseError, match='10000.*Big Bird'):
api_client.doesUserExist('Big Bird')
def test_createTestProject_unknownID(api_client):
with pytest.raises(TLResponseError, match='7001.*Empty name'):
api_client.createTestProject(testprojectname='',
testcaseprefix='P40000711')
def test_createTestProject_unknownITS(api_client):
with pytest.raises(TLResponseError, match='13000.*Unable to find'):
api_client.createTestProject(testprojectname='aProject',
testcaseprefix='aPrefix', itsname='unknownITS')
def test_getProjects(api_client):
assert None is not api_client.getProjects()
def test_createTestPlan_projectname_posArg_unknownID(api_client):
with pytest.raises(TLResponseError, match='7011.*40000712'):
api_client.createTestPlan('plan 40000711', 'project 40000712')
def test_createTestSuite_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.createTestSuite( 40000711, 'suite 40000712', 'detail 40000713')
# see test_apicall_differentPositionalArgs
# def test_createTestCase_unknownID(api_client):
# tc_steps = []
# with pytest.raises(TLResponseError, match='7000.*40000713'):
# api_client.createTestCase('case 40000711', 40000712, 40000713,
# 'Big Bird', 'summary 40000714', tc_steps)
def test_getBuildsForTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getBuildsForTestPlan(40000711)
def test_getFirstLevelTestSuitesForTestProject_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getFirstLevelTestSuitesForTestProject(40000711)
def test_getFullPath_unknownID(api_client):
with pytest.raises(TLResponseError, match='getFullPath.*234'):
api_client.getFullPath('40000711')
# see test_apicall_differentPositionalArgs
# def test_getLastExecutionResult_unknownID(api_client):
# with pytest.raises(TLResponseError, match='3000.*40000711'):
# api_client.getLastExecutionResult(40000711, testcaseid=40000712)
def test_getLatestBuildForTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getLatestBuildForTestPlan(40000711)
def test_getProjectTestPlans_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getProjectTestPlans(40000711)
def test_getProjectPlatforms_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getProjectPlatforms(40000711)
def test_getTestCase_unknownID(api_client):
with pytest.raises(TLResponseError, match='5000.*40000711'):
api_client.getTestCase(testcaseid=40000711)
def test_getTestCase_unknownExternalID(api_client):
with pytest.raises(TLResponseError, match='5040.*GPROAPI-40000711'):
api_client.getTestCase(testcaseexternalid='GPROAPI-40000711')
def test_getTestCaseAttachments_unknownID(api_client):
with pytest.raises(TLResponseError, match='5000.*40000711'):
api_client.getTestCaseAttachments(testcaseid=40000711)
# see test_apicall_differentPositionalArgs
# def test_getTestCaseCustomFieldDesignValue_unknownID(api_client):
# with pytest.raises(TLResponseError, match='7000.*40000711'):
# api_client.getTestCaseCustomFieldDesignValue(
# 'TC-40000712', 1, 40000711, 'a_field', details='full')
def test_getTestCaseIDByName_unknownID(api_client):
with pytest.raises(TLResponseError, match='5030.*Cannot find'):
api_client.getTestCaseIDByName('Big Bird')
def test_getTestCasesForTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getTestCasesForTestPlan(40000711)
# see test_apicall_differentPositionalArgs
# def test_getTestCasesForTestSuite_unknownID(api_client):
# with pytest.raises(TLResponseError, match='8000.*40000711'):
# api_client.getTestCasesForTestSuite(40000711)
def test_getTestPlanByName_unknownID(api_client):
with pytest.raises(TLResponseError, match='7011.*40000711'):
api_client.getTestPlanByName('project 40000711', 'plan 40000712')
def test_getTestPlanPlatforms_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getTestPlanPlatforms(40000711)
def test_getTestProjectByName_unknownID(api_client):
with pytest.raises(TLResponseError, match='7011.*40000711'):
api_client.getTestProjectByName('project 40000711')
def test_getTestSuiteByID_unknownID(api_client):
with pytest.raises(TLResponseError, match='8000.*40000711'):
api_client.getTestSuiteByID(40000711)
def test_getTestSuitesForTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getTestSuitesForTestPlan(40000711)
def test_getTestSuitesForTestSuite_unknownID(api_client):
with pytest.raises(TLResponseError, match='8000.*40000711'):
api_client.getTestSuitesForTestSuite(40000711)
def test_getTotalsForTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getTotalsForTestPlan(40000711)
# see test_apicall_differentPositionalArgs
# def test_createBuild_unknownID(api_client):
# with pytest.raises(TLResponseError, match='3000.*40000711'):
# api_client.createBuild(40000711, 'Build 40000712', buildnotes='note 40000713')
# see test_apicall_differentPositionalArgs
# def test_reportTCResult_unknownID(api_client):
# with pytest.raises(TLResponseError, match='5000.*40000711'):
# api_client.reportTCResult(40000712, 'p', testcaseid=40000711,
# buildname='build 40000713', notes='note 40000714' )
# see test_apicall_differentPositionalArgs
# def test_uploadExecutionAttachment_unknownID(api_client, attachmentFile):
# with pytest.raises(TLResponseError, match='6004.*40000712'):
# api_client.uploadExecutionAttachment(attachmentFile, 40000712,
# title='title 40000713', description='descr. 40000714')
def test_createPlatform_unknownID(api_client):
with pytest.raises(TLResponseError, match='7011.*40000711'):
api_client.createPlatform('Project 40000711', 'Platform 40000712',
notes='note 40000713')
def test_addPlatformToTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.addPlatformToTestPlan(40000711, 'Platform 40000712')
def test_removePlatformFromTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.removePlatformFromTestPlan(40000711, 'Platform 40000712')
def test_addTestCaseToTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.addTestCaseToTestPlan(40000711, 40000712, 'N-40000713', 1)
def test_updateTestCase_unknownID(api_client):
with pytest.raises(TLResponseError, match='5040.*N-40000711'):
api_client.updateTestCase('N-40000711', version=1)
def test_createTestCaseSteps_unknownID(api_client):
steps = [{'actions' : "Step action 6 -b added by updateTestCase" ,
'expected_results' : "Step result 6 - b added",
'step_number' : 6, 'execution_type' : 1}]
with pytest.raises(TLResponseError, match='5040.*N-40000711'):
api_client.createTestCaseSteps('update', steps,
testcaseexternalid='N-40000711', version=1)
def test_deleteTestCaseSteps_unknownID(api_client):
steps = [2,8]
with pytest.raises(TLResponseError, match='5040.*N-40000711'):
api_client.deleteTestCaseSteps('N-40000711', steps, version=1)
def test_uploadRequirementSpecificationAttachment_unknownID(api_client, attachmentFile):
with pytest.raises(TLResponseError, match='6004.*40000712'):
api_client.uploadRequirementSpecificationAttachment(attachmentFile, 40000712,
title='title 40000713', description='descr. 40000714')
def test_uploadRequirementAttachment_unknownID(api_client, attachmentFile):
with pytest.raises(TLResponseError, match='6004.*40000712'):
api_client.uploadRequirementAttachment(attachmentFile, 40000712,
title='title 40000713', description='descr. 40000714')
def test_uploadTestProjectAttachment_unknownID(api_client, attachmentFile):
with pytest.raises(TLResponseError, match='7000.*40000712'):
api_client.uploadTestProjectAttachment(attachmentFile, 40000712,
title='title 40000713', description='descr. 40000714')
def test_uploadTestSuiteAttachment_unknownID(api_client, attachmentFile):
with pytest.raises(TLResponseError, match='8000.*40000712'):
api_client.uploadTestSuiteAttachment(attachmentFile, 40000712,
title='title 40000713', description='descr. 40000714')
def test_uploadTestCaseAttachment_unknownID(api_client, attachmentFile):
with pytest.raises(TLResponseError, match='5000.*testcaseid'):
api_client.uploadTestCaseAttachment(attachmentFile, 40000712,
title='title 40000713', description='descr. 40000714')
def test_uploadAttachment_unknownID(api_client, attachmentFile):
with pytest.raises(TLResponseError, match='6004.*Invalid Foreign Key ID'):
api_client.uploadAttachment(attachmentFile, '0000', 'nodes_hierarchy',
title='title 40000713', description='descr. 40000714')
def test_testLinkVersion(api_client):
assert re.match('\d*\.\d*\.\d*', api_client.testLinkVersion() )
def test_getUserByLogin_unknownKey(api_client):
with pytest.raises(TLResponseError, match='10000.*User Login'):
api_client.getUserByLogin('unknownUser')
def test_getUserByID_unknownKey(api_client):
with pytest.raises(TLResponseError, match='NO_USER_BY_ID_LOGIN.*User with DB ID'):
api_client.getUserByID(40000711)
@pytest.mark.xfail(reason='setTestMode not implemented for python client' )
def test_setTestMode(api_client):
assert api_client.setTestMode(True)
assert not api_client.setTestMode(False)
def test_deleteExecution_unknownKey(api_client):
try:
# case: TL configuration allows deletion of executions
# response returns Success, even if executionID is unkown
expected = [{'status': True, 'message': 'Success!', 'id': 40000711,
'operation': 'deleteExecution'}]
assert expected == api_client.deleteExecution(40000711)
except TLResponseError as tl_err:
# case: TL configuration does not allow deletion of executions
# Expects: 232: Configuration does not allow delete executions
assert 232 == tl_err.code
def test_setTestCaseExecutionType_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000712'):
api_client.setTestCaseExecutionType('N-40000711', 1, 40000712, 1)
def test_assignRequirements_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000712'):
api_client.assignRequirements('N-40000711', 40000712,
[{'req_spec' : 40000713, 'requirements' : [40000714, 40000717]},
{'req_spec' : 4723, 'requirements' : [4725]}])
def test_getExecCountersByBuild_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getExecCountersByBuild(40000711)
def test_getTestCaseCustomFieldExecutionValue_unknownID(api_client):
with pytest.raises(TLResponseError, match='236.*version/executionid'):
api_client.getTestCaseCustomFieldExecutionValue(
'cf_full', '40000711', 1, '715', '40000713')
def test_getTestCaseCustomFieldTestPlanDesignValue_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getTestCaseCustomFieldTestPlanDesignValue(
'cf_full', '40000711', 1, '40000713', '615')
def test_updateTestCaseCustomFieldDesignValue_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.updateTestCaseCustomFieldDesignValue(
'TC-40000712', 1, 40000711, {'cf_field1' : 'value1',
'cf_field2' : 'value2'})
def test_getTestSuiteCustomFieldDesignValue_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getTestSuiteCustomFieldDesignValue(
'cf_full', 40000711, 40000713)
def test_getTestPlanCustomFieldDesignValue_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getTestPlanCustomFieldDesignValue(
'cf_full', 40000711, 40000712)
def test_getReqSpecCustomFieldDesignValue_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getReqSpecCustomFieldDesignValue(
'cf_full', 40000711, 4732)
def test_getRequirementCustomFieldDesignValue_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getRequirementCustomFieldDesignValue(
'cf_full', 40000711, 4734)
def test_assignTestCaseExecutionTask_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.assignTestCaseExecutionTask('username', 40000711, 'TC-40000712',
buildname='build 40000713',
platformname='platform 40000714')
def test_getTestCaseBugs_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getTestCaseBugs(40000711, testcaseexternalid='TC-40000712',
buildname='build 40000713',
platformname='platform 40000714')
def test_getTestCaseAssignedTester_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getTestCaseAssignedTester(40000711, 'TC-40000712',
buildname='build 40000713',
platformname='platform 40000714')
def test_unassignTestCaseExecutionTask_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.unassignTestCaseExecutionTask(40000711, 'TC-40000712',
buildname='build 40000713',
platformname='platform 40000714',
user='username',action='unassignOne')
def test_getProjectKeywords_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.getProjectKeywords(40000711)
def test_getTestCaseKeywords_unknownID(api_client):
with pytest.raises(TLResponseError, match='5040.*40000712'):
api_client.getTestCaseKeywords(testcaseid=40000712)
def test_getTestCaseKeywords_unknownID_set(api_client):
with pytest.raises(TLResponseError, match='5040.*40000712'):
api_client.getTestCaseKeywords(testcaseid=[40000712, 40000713])
def test_getTestCaseKeywords_unknownID_external_single(api_client):
with pytest.raises(TLResponseError, match='5040.*TC-40000712'):
api_client.getTestCaseKeywords(testcaseexternalid='TC-40000712')
def test_getTestCaseKeywords_unknownID_external_set(api_client):
with pytest.raises(TLResponseError, match='5040.*TC-40000712'):
api_client.getTestCaseKeywords(testcaseexternalid=['TC-40000712', 'TC-40000713'])
def test_deleteTestPlan_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.deleteTestPlan(40000711)
def test_addTestCaseKeywords_unknownID(api_client):
with pytest.raises(TLResponseError, match='5040.*TC-40000712'):
api_client.addTestCaseKeywords({'TC-40000712' :
['KeyWord01', 'KeyWord03']})
def test_removeTestCaseKeywords_unknownID(api_client):
with pytest.raises(TLResponseError, match='5040.*TC-40000712'):
api_client.removeTestCaseKeywords({'TC-40000712' : ['KeyWord01']})
def test_deleteTestProject_unknownID(api_client):
with pytest.raises(TLResponseError, match='7013.*TProjectPrefix'):
api_client.deleteTestProject('TProjectPrefix')
def test_createTestPlan_projectname_optArg_unknownID(api_client):
with pytest.raises(TLResponseError, match='7011.*40000712'):
api_client.createTestPlan('plan 40000711',
testprojectname='project 40000712')
def test_createTestPlan_prefix_unknownID(api_client):
with pytest.raises(TLResponseError, match='NO.*TProjectPrefix'):
api_client.createTestPlan('plan 40000713',
prefix='TProjectPrefix')
def test_updateTestSuiteCustomFieldDesignValue_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000712'):
api_client.updateTestSuiteCustomFieldDesignValue(
'40000712 TP-ID', '40000711 TS-ID',
{'cf_tc_ex_string' : 'a custom exec value',
'cf_tc_ex_numeric' : 111} )
def test_getTestSuite_unknownID(api_client):
with pytest.raises(TLResponseError, match='NO.*TProjectPrefix'):
api_client.getTestSuite('suite 40000712', 'TProjectPrefix')
def test_updateTestSuite_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000711'):
api_client.updateTestSuite(40000712, testprojectid=40000711,
testsuitename = 'suite 40000712 updated',
details = 'detail 40000713 updated',
order =1)
def test_getIssueTrackerSystem_unknownITS(api_client):
with pytest.raises(TLResponseError, match='13000.*Unable to find'):
api_client.getIssueTrackerSystem('unknownITS')
def test_updateBuildCustomFieldsValues_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000712'):
api_client.updateBuildCustomFieldsValues(
'40000712 project', '40000713 plan', '40000714 build',
{'cf_b_ex_string' : 'a custom exec value',
'cf_b_ex_numeric' : 111} )
def test_getExecutionSet_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000713'):
api_client.getExecutionSet(
'40000713 plan', testcaseexternalid = 'TC-40000712')
def test_getRequirements_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000712'):
api_client.getRequirements(
'40000712 project',
testplanid = '40000713 plan',
platformid = '40000714 platform')
def test_getReqCoverage_unknownID(api_client):
with pytest.raises(TLResponseError, match='7000.*40000712'):
api_client.getReqCoverage(
'40000712 project', '40000721 req')
def test_setTestCaseTestSuite_unknownID(api_client):
with pytest.raises(TLResponseError, match='5040.*TC-40000712'):
api_client.setTestCaseTestSuite(
'TC-40000712', '40000713 suite')
def test_getTestSuiteAttachments_unknownID(api_client):
with pytest.raises(TLResponseError, match='8000.*40000712'):
api_client.getTestSuiteAttachments(40000712)
def test_getAllExecutionsResults_unknownID(api_client):
with pytest.raises(TLResponseError, match='3000.*40000711'):
api_client.getAllExecutionsResults(40000711)
# if __name__ == "__main__":
# #import sys;sys.argv = ['', 'Test.testName']
# unittest.main() |
py | 7dfd483dea0da0e2807d79207436cd868a129168 | import decimal
from django.db import models
from django.db.models.expressions import F
from coupons.models import Coupon
from shop.models import Product
from decimal import Decimal
from django.core.validators import MinValueValidator, \
MaxValueValidator
from coupons.models import Coupon
from django.utils.translation import gettext_lazy as _
# Create your models here.
class Order(models.Model):
first_name = models.CharField(_('first_name'),
max_length=50)
last_name =models.CharField(_('last_name'),
max_length=50)
email = models.EmailField(_('e-mail'))
address = models.CharField(_('address'),
max_length=250)
postal_code = models.CharField(_('postal code'),
max_length=20)
city = models.CharField(_('city'),
max_length=100)
created = models.DateField(auto_now_add=True)
updated = models.DateField(auto_now=True)
paid = models.BooleanField(default=False)
braintree_id = models.CharField(max_length=150, blank=True)
coupon = models.ForeignKey(Coupon,
related_name='orders',
null=True,
blank=True,
on_delete=models.SET_NULL)
discount = models.IntegerField(default=0,
validators=[MinValueValidator(0),
MaxValueValidator(100)])
class Meta:
ordering = ('-created',)
def __str__(self):
return f'Order {self.id}'
def get_total_cost(self):
total_cost = sum(item.get_cost() for item in self.items.all())
return total_cost - total_cost * \
(self.discount / Decimal(100))
class OrderItem(models.Model):
order = models.ForeignKey(Order,
related_name='items',
on_delete=models.CASCADE)
product = models.ForeignKey(Product,
related_name='order_items',
on_delete=models.CASCADE)
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity =models.PositiveIntegerField(default=1)
def __str__(self):
return str(self.id)
def get_cost(self):
return self.price * self.quantity
|
py | 7dfd48d427e10351387772aa4ca1e627c616e2fd | """ Example functions copied from the Internet. Probably, we should not use them; they are here to test PyCairo. """
import numpy
import cairo
import pygame
import array
import math
import sys
def draw(surface):
x, y, radius = (250, 250, 200)
ctx = cairo.Context(surface)
ctx.set_line_width(15)
ctx.arc(x, y, radius, 0, 2.0 * math.pi)
ctx.set_source_rgb(0.8, 0.8, 0.8)
ctx.fill_preserve()
ctx.set_source_rgb(1, 1, 1)
ctx.stroke()
def input(events):
for event in events:
if event.type == pygame.QUIT:
sys.exit(0)
# Create Cairo Surface
Width, Height = 512, 512
#data = array.array('c', chr(0) * Width * Height * 4)
data = numpy.empty(Width * Height * 4, dtype = numpy.int8)
stride = Width * 4
surface = cairo.ImageSurface.create_for_data(data, cairo.FORMAT_ARGB32, Width, Height, stride)
# init PyGame
pygame.init()
window = pygame.display.set_mode((Width, Height))
screen = pygame.display.get_surface()
# Draw with Cairo
draw(surface)
# Create PyGame surface from Cairo Surface
image = pygame.image.frombuffer(data, (Width, Height), "ARGB")
# Tranfer to Screen
while True:
x, y, radius = (350, 150, 100)
ctx = cairo.Context(surface)
ctx.set_line_width(15)
ctx.arc(x, y, radius, 0, 2.0 * math.pi)
ctx.set_source_rgb(0.8, 0.8, 0.8)
ctx.fill_preserve()
ctx.set_source_rgb(1, 1, 1)
ctx.stroke()
image = pygame.image.frombuffer(data.tostring(), (Width, Height), "ARGB")
screen.blit(image, (0, 0))
pygame.display.flip()
input(pygame.event.get()) |
py | 7dfd4947a2a603487b2776ab1d7362d17ca1f66e | import sys
import os
import marshal
import imp
import struct
import time
import unittest
from test import support
from test.test_importhooks import ImportHooksBaseTestCase, test_src, test_co
from zipfile import ZipFile, ZipInfo, ZIP_STORED, ZIP_DEFLATED
import zipimport
import linecache
import doctest
import inspect
import io
from traceback import extract_tb, extract_stack, print_tb
raise_src = 'def do_raise(): raise TypeError\n'
def make_pyc(co, mtime, size):
data = marshal.dumps(co)
if type(mtime) is type(0.0):
# Mac mtimes need a bit of special casing
if mtime < 0x7fffffff:
mtime = int(mtime)
else:
mtime = int(-0x100000000 + int(mtime))
pyc = imp.get_magic() + struct.pack("<ii", int(mtime), size & 0xFFFFFFFF) + data
return pyc
def module_path_to_dotted_name(path):
return path.replace(os.sep, '.')
NOW = time.time()
test_pyc = make_pyc(test_co, NOW, len(test_src))
TESTMOD = "ziptestmodule"
TESTPACK = "ziptestpackage"
TESTPACK2 = "ziptestpackage2"
TEMP_ZIP = os.path.abspath("junk95142.zip")
pyc_file = imp.cache_from_source(TESTMOD + '.py')
pyc_ext = ('.pyc' if __debug__ else '.pyo')
class UncompressedZipImportTestCase(ImportHooksBaseTestCase):
compression = ZIP_STORED
def setUp(self):
# We're reusing the zip archive path, so we must clear the
# cached directory info and linecache
linecache.clearcache()
zipimport._zip_directory_cache.clear()
ImportHooksBaseTestCase.setUp(self)
def doTest(self, expected_ext, files, *modules, **kw):
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
stuff = kw.get("stuff", None)
if stuff is not None:
# Prepend 'stuff' to the start of the zipfile
with open(TEMP_ZIP, "rb") as f:
data = f.read()
with open(TEMP_ZIP, "wb") as f:
f.write(stuff)
f.write(data)
sys.path.insert(0, TEMP_ZIP)
mod = __import__(".".join(modules), globals(), locals(),
["__dummy__"])
call = kw.get('call')
if call is not None:
call(mod)
if expected_ext:
file = mod.get_file()
self.assertEqual(file, os.path.join(TEMP_ZIP,
*modules) + expected_ext)
finally:
z.close()
os.remove(TEMP_ZIP)
def testAFakeZlib(self):
#
# This could cause a stack overflow before: importing zlib.py
# from a compressed archive would cause zlib to be imported
# which would find zlib.py in the archive, which would... etc.
#
# This test *must* be executed first: it must be the first one
# to trigger zipimport to import zlib (zipimport caches the
# zlib.decompress function object, after which the problem being
# tested here wouldn't be a problem anymore...
# (Hence the 'A' in the test method name: to make it the first
# item in a list sorted by name, like unittest.makeSuite() does.)
#
# This test fails on platforms on which the zlib module is
# statically linked, but the problem it tests for can't
# occur in that case (builtin modules are always found first),
# so we'll simply skip it then. Bug #765456.
#
if "zlib" in sys.builtin_module_names:
return
if "zlib" in sys.modules:
del sys.modules["zlib"]
files = {"zlib.py": (NOW, test_src)}
try:
self.doTest(".py", files, "zlib")
except ImportError:
if self.compression != ZIP_DEFLATED:
self.fail("expected test to not raise ImportError")
else:
if self.compression != ZIP_STORED:
self.fail("expected test to raise ImportError")
def testPy(self):
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD)
def testPyc(self):
files = {TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTMOD)
def testBoth(self):
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTMOD)
def testEmptyPy(self):
files = {TESTMOD + ".py": (NOW, "")}
self.doTest(None, files, TESTMOD)
def testBadMagic(self):
# make pyc magic word invalid, forcing loading from .py
badmagic_pyc = bytearray(test_pyc)
badmagic_pyc[0] ^= 0x04 # flip an arbitrary bit
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
self.doTest(".py", files, TESTMOD)
def testBadMagic2(self):
# make pyc magic word invalid, causing an ImportError
badmagic_pyc = bytearray(test_pyc)
badmagic_pyc[0] ^= 0x04 # flip an arbitrary bit
files = {TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
try:
self.doTest(".py", files, TESTMOD)
except ImportError:
pass
else:
self.fail("expected ImportError; import from bad pyc")
def testBadMTime(self):
badtime_pyc = bytearray(test_pyc)
# flip the second bit -- not the first as that one isn't stored in the
# .py's mtime in the zip archive.
badtime_pyc[7] ^= 0x02
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, badtime_pyc)}
self.doTest(".py", files, TESTMOD)
def testPackage(self):
packdir = TESTPACK + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir + TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTPACK, TESTMOD)
def testDeepPackage(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTPACK, TESTPACK2, TESTMOD)
def testZipImporterMethods(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP)
self.assertEqual(zi.archive, TEMP_ZIP)
self.assertEqual(zi.is_package(TESTPACK), True)
mod = zi.load_module(TESTPACK)
self.assertEqual(zi.get_filename(TESTPACK), mod.__file__)
existing_pack_path = __import__(TESTPACK).__path__[0]
expected_path_path = os.path.join(TEMP_ZIP, TESTPACK)
self.assertEqual(existing_pack_path, expected_path_path)
self.assertEqual(zi.is_package(packdir + '__init__'), False)
self.assertEqual(zi.is_package(packdir + TESTPACK2), True)
self.assertEqual(zi.is_package(packdir2 + TESTMOD), False)
mod_path = packdir2 + TESTMOD
mod_name = module_path_to_dotted_name(mod_path)
__import__(mod_name)
mod = sys.modules[mod_name]
self.assertEqual(zi.get_source(TESTPACK), None)
self.assertEqual(zi.get_source(mod_path), None)
self.assertEqual(zi.get_filename(mod_path), mod.__file__)
# To pass in the module name instead of the path, we must use the
# right importer
loader = mod.__loader__
self.assertEqual(loader.get_source(mod_name), None)
self.assertEqual(loader.get_filename(mod_name), mod.__file__)
# test prefix and archivepath members
zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK)
self.assertEqual(zi2.archive, TEMP_ZIP)
self.assertEqual(zi2.prefix, TESTPACK + os.sep)
finally:
z.close()
os.remove(TEMP_ZIP)
def testZipImporterMethodsInSubDirectory(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir)
self.assertEqual(zi.archive, TEMP_ZIP)
self.assertEqual(zi.prefix, packdir)
self.assertEqual(zi.is_package(TESTPACK2), True)
mod = zi.load_module(TESTPACK2)
self.assertEqual(zi.get_filename(TESTPACK2), mod.__file__)
self.assertEqual(
zi.is_package(TESTPACK2 + os.sep + '__init__'), False)
self.assertEqual(
zi.is_package(TESTPACK2 + os.sep + TESTMOD), False)
mod_path = TESTPACK2 + os.sep + TESTMOD
mod_name = module_path_to_dotted_name(mod_path)
__import__(mod_name)
mod = sys.modules[mod_name]
self.assertEqual(zi.get_source(TESTPACK2), None)
self.assertEqual(zi.get_source(mod_path), None)
self.assertEqual(zi.get_filename(mod_path), mod.__file__)
# To pass in the module name instead of the path, we must use the
# right importer
loader = mod.__loader__
self.assertEqual(loader.get_source(mod_name), None)
self.assertEqual(loader.get_filename(mod_name), mod.__file__)
finally:
z.close()
os.remove(TEMP_ZIP)
def testGetData(self):
z = ZipFile(TEMP_ZIP, "w")
z.compression = self.compression
try:
name = "testdata.dat"
data = bytes(x for x in range(256))
z.writestr(name, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP)
self.assertEqual(data, zi.get_data(name))
self.assertIn('zipimporter object', repr(zi))
finally:
z.close()
os.remove(TEMP_ZIP)
def testImporterAttr(self):
src = """if 1: # indent hack
def get_file():
return __file__
if __loader__.get_data("some.data") != b"some data":
raise AssertionError("bad data")\n"""
pyc = make_pyc(compile(src, "<???>", "exec"), NOW, len(src))
files = {TESTMOD + pyc_ext: (NOW, pyc),
"some.data": (NOW, "some data")}
self.doTest(pyc_ext, files, TESTMOD)
def testImport_WithStuff(self):
# try importing from a zipfile which contains additional
# stuff at the beginning of the file
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD,
stuff=b"Some Stuff"*31)
def assertModuleSource(self, module):
self.assertEqual(inspect.getsource(module), test_src)
def testGetSource(self):
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD, call=self.assertModuleSource)
def testGetCompiledSource(self):
pyc = make_pyc(compile(test_src, "<???>", "exec"), NOW, len(test_src))
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, pyc)}
self.doTest(pyc_ext, files, TESTMOD, call=self.assertModuleSource)
def runDoctest(self, callback):
files = {TESTMOD + ".py": (NOW, test_src),
"xyz.txt": (NOW, ">>> log.append(True)\n")}
self.doTest(".py", files, TESTMOD, call=callback)
def doDoctestFile(self, module):
log = []
old_master, doctest.master = doctest.master, None
try:
doctest.testfile(
'xyz.txt', package=module, module_relative=True,
globs=locals()
)
finally:
doctest.master = old_master
self.assertEqual(log,[True])
def testDoctestFile(self):
self.runDoctest(self.doDoctestFile)
def doDoctestSuite(self, module):
log = []
doctest.DocFileTest(
'xyz.txt', package=module, module_relative=True,
globs=locals()
).run()
self.assertEqual(log,[True])
def testDoctestSuite(self):
self.runDoctest(self.doDoctestSuite)
def doTraceback(self, module):
try:
module.do_raise()
except:
tb = sys.exc_info()[2].tb_next
f,lno,n,line = extract_tb(tb, 1)[0]
self.assertEqual(line, raise_src.strip())
f,lno,n,line = extract_stack(tb.tb_frame, 1)[0]
self.assertEqual(line, raise_src.strip())
s = io.StringIO()
print_tb(tb, 1, s)
self.assertTrue(s.getvalue().endswith(raise_src))
else:
raise AssertionError("This ought to be impossible")
def testTraceback(self):
files = {TESTMOD + ".py": (NOW, raise_src)}
self.doTest(None, files, TESTMOD, call=self.doTraceback)
@unittest.skipIf(support.TESTFN_UNENCODABLE is None,
"need an unencodable filename")
def testUnencodable(self):
filename = support.TESTFN_UNENCODABLE + ".zip"
z = ZipFile(filename, "w")
zinfo = ZipInfo(TESTMOD + ".py", time.localtime(NOW))
zinfo.compress_type = self.compression
z.writestr(zinfo, test_src)
z.close()
try:
zipimport.zipimporter(filename)
finally:
os.remove(filename)
@support.requires_zlib
class CompressedZipImportTestCase(UncompressedZipImportTestCase):
compression = ZIP_DEFLATED
class BadFileZipImportTestCase(unittest.TestCase):
def assertZipFailure(self, filename):
self.assertRaises(zipimport.ZipImportError,
zipimport.zipimporter, filename)
def testNoFile(self):
self.assertZipFailure('AdfjdkFJKDFJjdklfjs')
def testEmptyFilename(self):
self.assertZipFailure('')
def testBadArgs(self):
self.assertRaises(TypeError, zipimport.zipimporter, None)
self.assertRaises(TypeError, zipimport.zipimporter, TESTMOD, kwd=None)
def testFilenameTooLong(self):
self.assertZipFailure('A' * 33000)
def testEmptyFile(self):
support.unlink(TESTMOD)
support.create_empty_file(TESTMOD)
self.assertZipFailure(TESTMOD)
def testFileUnreadable(self):
support.unlink(TESTMOD)
fd = os.open(TESTMOD, os.O_CREAT, 000)
try:
os.close(fd)
self.assertZipFailure(TESTMOD)
finally:
# If we leave "the read-only bit" set on Windows, nothing can
# delete TESTMOD, and later tests suffer bogus failures.
os.chmod(TESTMOD, 0o666)
support.unlink(TESTMOD)
def testNotZipFile(self):
support.unlink(TESTMOD)
fp = open(TESTMOD, 'w+')
fp.write('a' * 22)
fp.close()
self.assertZipFailure(TESTMOD)
# XXX: disabled until this works on Big-endian machines
def _testBogusZipFile(self):
support.unlink(TESTMOD)
fp = open(TESTMOD, 'w+')
fp.write(struct.pack('=I', 0x06054B50))
fp.write('a' * 18)
fp.close()
z = zipimport.zipimporter(TESTMOD)
try:
self.assertRaises(TypeError, z.find_module, None)
self.assertRaises(TypeError, z.load_module, None)
self.assertRaises(TypeError, z.is_package, None)
self.assertRaises(TypeError, z.get_code, None)
self.assertRaises(TypeError, z.get_data, None)
self.assertRaises(TypeError, z.get_source, None)
error = zipimport.ZipImportError
self.assertEqual(z.find_module('abc'), None)
self.assertRaises(error, z.load_module, 'abc')
self.assertRaises(error, z.get_code, 'abc')
self.assertRaises(IOError, z.get_data, 'abc')
self.assertRaises(error, z.get_source, 'abc')
self.assertRaises(error, z.is_package, 'abc')
finally:
zipimport._zip_directory_cache.clear()
def test_main():
try:
support.run_unittest(
UncompressedZipImportTestCase,
CompressedZipImportTestCase,
BadFileZipImportTestCase,
)
finally:
support.unlink(TESTMOD)
if __name__ == "__main__":
test_main()
|
py | 7dfd4a2837bf5c2041b5352c88b354ad1db40a00 | from telnetlib import Telnet
from typing import List, Iterable
from stmlearn.suls import SUL
class RemoteCoffeeMachineSUL(SUL):
def __init__(self, host, port):
self.tn = Telnet(host, port)
# Inputs is a list of inputs to send over telnet
# You should make this function return the last
# output the coffee machine sends back to you
def process_input(self, inputs: Iterable[str]) -> str:
output = ''
for i in inputs:
self.tn.write((i + '\n').encode('ascii'))
bytes = self.tn.read_until('\n'.encode('ascii'))
output = bytes.decode('ascii').replace('\n', '')
print(output)
return output
def reset(self) -> None:
self.tn.write('reset\n'.encode('ascii'))
def get_alphabet(self) -> Iterable[str]:
return ["choose_capuccino", "choose_black", "pay", "make_coffee"]
|
py | 7dfd4bb1b4d37d482d4983411e0f2e76ce5fbcd9 | # Copyright (c) 2019, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
log = logging.getLogger(__name__)
class KafkaWriter:
"""
Publish to Kafka topic based on config object.
:param kafka_topic: Kafka topic
:param batch_size: batch size
:param delimiter: delimiter
:param producer: producer
"""
# Column name of formatted output messages sent to kafka
output_colname = "delimited_output"
def __init__(self, kafka_topic, batch_size, delimiter, producer):
self._kafka_topic = kafka_topic
self._batch_size = batch_size
self._delimiter = delimiter
self._producer = producer
@property
def producer(self):
return self._producer
@property
def delimiter(self):
return self._delimiter
def write_data(self, df):
"""
publish messages to kafka topic
:param df: dataframe to publish
"""
out_df = self._generate_delimited_ouput_col(df)
for rec in out_df.to_records():
self.producer.produce(self._kafka_topic, rec[self.output_colname])
if len(self.producer) > self._batch_size:
log.debug(
"batch reached, calling poll... producer unsent: %s",
len(self.producer),
)
self.producer.poll(0)
def _generate_delimited_ouput_col(self, gdf):
first_col = gdf.columns[0]
gdf[first_col] = gdf[first_col].astype("str").str.fillna("")
gdf[self.output_colname] = gdf[first_col].astype("str").str.rstrip()
for col in gdf.columns[1:-1]:
gdf[col] = gdf[col].astype("str").fillna("")
gdf[col] = gdf[col].astype("str").str.rstrip()
gdf[self.output_colname] = gdf[self.output_colname].str.cat(
gdf[col], sep=self.delimiter
)
return gdf
def close(self):
"""
Close Kafka writer
"""
log.info("Closing kafka writer...")
if self._producer is not None:
self._producer.flush()
log.info("Closed kafka writer.")
|
py | 7dfd4bcff0d0fb7b4cce7e2b0d4e5ac167c915c6 | import unittest
import numpy as np
import numpy.testing as npt
import flavio
import scipy.stats
class TestMath(unittest.TestCase):
def test_normal_pdf(self):
x = 2.5
x_arr = np.array([-0.3, 1, 1.5, 2.])
mu = 0.3
sigma = 0.92
# with numbers
self.assertAlmostEqual(
flavio.math.functions.normal_logpdf(x, mu, sigma),
scipy.stats.norm.logpdf(x, mu, sigma), places=10)
self.assertAlmostEqual(
flavio.math.functions.normal_pdf(x, mu, sigma),
scipy.stats.norm.pdf(x, mu, sigma), places=10)
# with arrays
npt.assert_array_almost_equal(
flavio.math.functions.normal_logpdf(x_arr, mu, sigma),
scipy.stats.norm.logpdf(x_arr, mu, sigma),
decimal=10)
npt.assert_array_almost_equal(
flavio.math.functions.normal_pdf(x_arr, mu, sigma),
scipy.stats.norm.pdf(x_arr, mu, sigma),
decimal=10)
def test_dilog(self):
# check that mpmath result for complex dilog is reproduced
self.assertAlmostEqual(flavio.math.functions.li2(1 + 1j),
0.6168502750680851+1.4603621167531196j)
self.assertAlmostEqual(flavio.math.functions.li2(1 - 2j),
-0.05947479867380914-2.072647971774756j)
self.assertAlmostEqual(flavio.math.functions.li2(1),
1.6449340668482264)
self.assertAlmostEqual(flavio.math.functions.li2(-1),
-0.8224670334241132)
|
py | 7dfd4c1a8da146716c6184729b2c7e17c1bd37cf | # -*- coding: utf-8 -*-
"""
Deprecated OpenGL visualization
"""
import numpy as np
# package PyOpenGL:
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
NUM_POLYGON = 6
DISTANCE_FACTOR = 2.5
class Visualizer:
"""
None interactive visualization of FiberBundles.
Note:
The methode fastpli.model.Solver.DrawScene() is much faster.
Later this method will be an interactive visualizer
"""
fbs = []
rot_x = rot_y = 30
distance = 0
distance_new = distance
center = np.empty((3), float)
center_new = center
def __init__(self, width=800, height=600, title='fastpli.model.Visualizer'):
self._glut_init(width, height, title)
def _glut_init(self, width, height, title):
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
glutInitWindowSize(width, height)
glutInitWindowPosition(0, 0)
glutCreateWindow(title)
# setting scene
glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_COLOR_MATERIAL)
glLightfv(GL_LIGHT0, GL_AMBIENT, (0.2, 0.2, 0.2, 1.0))
glLightfv(GL_LIGHT0, GL_DIFFUSE, [0.8, 0.8, 0.8, 1.0])
glLightfv(GL_LIGHT0, GL_SPECULAR, [0.8, 0.8, 0.8, 1.0])
glEnable(GL_DEPTH_TEST)
def _draw_cylinders(self):
quadObj = gluNewQuadric()
glColor3f(0.8, 0.8, 0.8)
for fb in self.fbs:
for f in fb:
(points, radii) = f.data
for i in range(len(radii) - 1):
dp = points[i + 1, :] - points[i, :]
height = np.linalg.norm(dp)
unit = dp / height
theta = np.arccos(unit[2]) / np.pi * 180
phi = np.arctan2(unit[1], unit[0]) / np.pi * 180
glPushMatrix()
glTranslatef(points[i, 0], points[i, 1], points[i, 2])
glRotatef(phi, 0.0, 0.0, 1.0)
glRotatef(theta, 0.0, 1.0, 0.0)
gluCylinder(quadObj, radii[i], radii[i + 1], height,
NUM_POLYGON, 1)
glPopMatrix()
def _auto_volume(self):
max_vol = np.array((-float('inf'), -float('inf'), -float('inf')))
min_vol = np.array((float('inf'), float('inf'), float('inf')))
for fb in self.fbs:
for f in fb:
fiber_min = f.points.min(axis=0)
fiber_max = f.points.max(axis=0)
min_vol = np.array([min_vol, fiber_min]).min(axis=0)
max_vol = np.array([max_vol, fiber_max]).max(axis=0)
self.center_new = (min_vol + max_vol) / 2
self.distance_new = max(max_vol - min_vol)
def _resize(self, width, height):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glViewport(0, 0, width, height)
gluPerspective(45, width / height, 0.1, 1000000)
glMatrixMode(GL_MODELVIEW)
def set_fbs(self, fiber_bundles):
self.fbs = fiber_bundles
self._auto_volume()
if sum(abs(self.center - self.center_new) / self.center) > 0.1:
self.center = self.center_new
if abs((self.distance - self.distance_new) / self.distance) > 0.1:
self.distance = self.distance_new
def draw(self):
self._resize(glutGet(GLUT_WINDOW_WIDTH), glutGet(GLUT_WINDOW_HEIGHT))
glClearColor(0, 0, 0, 1)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLightfv(GL_LIGHT0, GL_POSITION, [1, 1, 1, 0])
glLoadIdentity()
glTranslatef(-self.center[0], -self.center[1], -self.center[2])
glTranslatef(0, 0, -DISTANCE_FACTOR * self.distance)
glRotatef(self.rot_x, 1, 0, 0)
glRotatef(self.rot_y, 0, 1, 0)
self._draw_cylinders()
glutSwapBuffers()
def set_rot(self, rotx, roty):
self.rot_x += rotx
self.rot_y += roty
|
py | 7dfd4c88b3c2ffbf4dab8aa41d790b4232f3822d |
from runner.runner import RunnerListener
class NetworkSaver(RunnerListener):
def __init__(self, SAVE_FREQ, output_dir, model):
self.total_step_count = 0
self.SAVE_FREQ = SAVE_FREQ
self.output_dir = output_dir
self.model = model
def on_step(self, ob = None, action = None, next_ob = None, reward = None, done = None):
self.total_step_count += 1
if self.total_step_count % self.SAVE_FREQ == 0:
if not self.output_dir is None:
self.model.save_weights(self.output_dir + '/weights_{0}.h5'.format(self.total_step_count))
|
py | 7dfd4ca8ecada8096b0585b10792ca1c2658e1d8 | #!/usr/bin/env python
__author__ = '[email protected]'
"""
Identical to the collapse script provided in cDNA_primer (ToFU) GitHub.
Takes a SAM file and the input fasta/fastq used to produce the SAM file,
filter out alignments based on low coverage/identity and collapse/merge
any identical isoforms based on the aligned exonic structure.
Example:
collapse_isoforms_by_sam.py --input test.fq --fq -s test.fq.sorted.sam --dun-merge-5-shorter -o test
Suggested scripts to follow up with:
get_abundance_post_collapse.py: create count (absolute and normalized) information post collapse
filter_by_count.py: filter away based on FL count support
filter_away_subset.py (if collapse is run with --dun-merge-5-shorter)
"""
import os, sys
from collections import defaultdict
from bx.intervals import IntervalTree
from Bio import SeqIO
from cupcake.tofu.utils import check_ids_unique
from cupcake.tofu.branch import branch_simple2
from cupcake.tofu import compare_junctions
from cupcake.io import GFF
def pick_rep(fa_fq_filename, gff_filename, group_filename, output_filename, is_fq=False, pick_least_err_instead=False, bad_gff_filename=None):
"""
For each group, select the representative record
If is FASTA file (is_fa False) -- then always pick the longest one
If is FASTQ file (is_fq True) -- then
If pick_least_err_instead is True, pick the one w/ least number of expected base errors
Else, pick the longest one
"""
fd = SeqIO.to_dict(SeqIO.parse(open(fa_fq_filename), 'fastq' if is_fq else 'fasta'))
fout = open(output_filename, 'w')
coords = {}
for line in open(gff_filename):
# ex: chr1 PacBio transcript 27567 29336 . - . gene_id "PB.1"; transcript_id "PB.1.1";
raw = line.strip().split('\t')
if raw[2] == 'transcript':
tid = raw[-1].split('; ')[1].split()[1][1:-2]
coords[tid] = "{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4], raw[6])
if bad_gff_filename is not None:
for line in open(bad_gff_filename):
raw = line.strip().split('\t')
if raw[2] == 'transcript':
tid = raw[-1].split('; ')[1].split()[1][1:-2]
coords[tid] = "{0}:{1}-{2}({3})".format(raw[0], raw[3], raw[4], raw[6])
for line in open(group_filename):
pb_id, members = line.strip().split('\t')
print("Picking representative sequence for {0}".format(pb_id), file=sys.stdout)
best_rec = None
#best_id = None
#best_seq = None
#best_qual = None
best_err = 9999999
err = 9999999
max_len = 0
for x in members.split(','):
if is_fq and pick_least_err_instead:
err = sum(i**-(i/10.) for i in fd[x].letter_annotations['phred_quality'])
if (is_fq and pick_least_err_instead and err < best_err) or ((not is_fq or not pick_least_err_instead) and len(fd[x].seq) >= max_len):
best_rec = fd[x]
#best_id = x
#best_seq = fd[x].seq
#if is_fq:
# best_qual = fd[x].quality
# best_err = err
max_len = len(fd[x].seq)
_id_ = "{0}|{1}|{2}".format(pb_id, coords[pb_id], best_rec.id)
best_rec.id = _id_
SeqIO.write(best_rec, fout, 'fastq' if is_fq else 'fasta')
fout.close()
def collapse_fuzzy_junctions(gff_filename, group_filename, allow_extra_5exon, internal_fuzzy_max_dist):
def can_merge(m, r1, r2):
if m == 'exact':
return True
else:
if not allow_extra_5exon:
return False
# below is continued only if (a) is 'subset' or 'super' AND (b) allow_extra_5exon is True
if m == 'subset':
r1, r2 = r2, r1 # rotate so r1 is always the longer one
if m == 'super' or m == 'subset':
n2 = len(r2.ref_exons)
# check that (a) r1 and r2 end on same 3' exon, that is the last acceptor site agrees
# AND (b) the 5' start of r2 is sandwiched between the matching r1 exon coordinates
if r1.strand == '+':
return abs(r1.ref_exons[-1].start - r2.ref_exons[-1].start) <= internal_fuzzy_max_dist and \
r1.ref_exons[-n2].start <= r2.ref_exons[0].start < r1.ref_exons[-n2].end
else:
return abs(r1.ref_exons[0].end - r2.ref_exons[0].end) <= internal_fuzzy_max_dist and \
r1.ref_exons[n2-1].start <= r2.ref_exons[-1].end < r1.ref_exons[n2].end
return False
d = {}
recs = defaultdict(lambda: {'+':IntervalTree(), '-':IntervalTree()}) # chr --> strand --> tree
fuzzy_match = defaultdict(lambda: [])
for r in GFF.collapseGFFReader(gff_filename):
d[r.seqid] = r
has_match = False
r.segments = r.ref_exons
for r2 in recs[r.chr][r.strand].find(r.start, r.end):
r2.segments = r2.ref_exons
m = compare_junctions.compare_junctions(r, r2, internal_fuzzy_max_dist=internal_fuzzy_max_dist, max_5_diff=args.max_5_diff, max_3_diff=args.max_3_diff)
if can_merge(m, r, r2):
fuzzy_match[r2.seqid].append(r.seqid)
has_match = True
break
if not has_match:
recs[r.chr][r.strand].insert(r.start, r.end, r)
fuzzy_match[r.seqid] = [r.seqid]
group_info = {}
with open(group_filename) as f:
for line in f:
pbid, members = line.strip().split('\t')
group_info[pbid] = [x for x in members.split(',')]
# pick for each fuzzy group the one that has the most exons
keys = list(fuzzy_match.keys())
keys.sort(key=lambda x: int(x.split('.')[1]))
f_gff = open(gff_filename+'.fuzzy', 'w')
f_group = open(group_filename+'.fuzzy', 'w')
for k in keys:
all_members = []
best_pbid, best_size, best_num_exons = fuzzy_match[k][0], len(group_info[fuzzy_match[k][0]]), len(d[fuzzy_match[k][0]].ref_exons)
all_members += group_info[fuzzy_match[k][0]]
for pbid in fuzzy_match[k][1:]:
_num_exons = len(d[pbid].ref_exons)
_size = len(group_info[pbid])
all_members += group_info[pbid]
if _num_exons > best_num_exons or (_num_exons==best_num_exons and _size>best_size):
best_pbid, best_size, best_num_exons = pbid, _size, _num_exons
GFF.write_collapseGFF_format(f_gff, d[best_pbid])
f_group.write("{0}\t{1}\n".format(best_pbid, ",".join(all_members)))
f_gff.close()
f_group.close()
return fuzzy_match
def main(args):
### sanity check that input file and input SAM exists
if not os.path.exists(args.input):
print("Input file {0} does not exist. Abort.".format(args.input), file=sys.stderr)
sys.exit(-1)
if not os.path.exists(args.sam):
print("SAM file {0} does not exist. Abort.".format(args.sam), file=sys.stderr)
sys.exit(-1)
# check for duplicate IDs
check_ids_unique(args.input, is_fq=args.fq)
ignored_fout = open(args.prefix + '.ignored_ids.txt', 'w')
if args.flnc_coverage > 0:
f_good = open(args.prefix + '.collapsed.good.gff', 'w')
f_bad = open(args.prefix + '.collapsed.bad.gff', 'w')
cov_threshold = args.flnc_coverage
else:
f_good = open(args.prefix + '.collapsed.gff', 'w')
f_bad = f_good
cov_threshold = 1
f_txt = open(args.prefix + '.collapsed.group.txt', 'w')
b = branch_simple2.BranchSimple(args.input, cov_threshold=cov_threshold, min_aln_coverage=args.min_aln_coverage, min_aln_identity=args.min_aln_identity, is_fq=args.fq, max_5_diff=args.max_5_diff, max_3_diff=args.max_3_diff)
iter = b.iter_gmap_sam(args.sam, ignored_fout)
for recs in iter: # recs is {'+': list of list of records, '-': list of list of records}
for v in recs.values():
for v2 in v:
if len(v2) > 0: b.process_records(v2, args.allow_extra_5exon, False, f_good, f_bad, f_txt)
ignored_fout.close()
f_good.close()
try:
f_bad.close()
except:
pass
f_txt.close()
if args.max_fuzzy_junction > 0: # need to further collapse those that have fuzzy junctions!
collapse_fuzzy_junctions(f_good.name, f_txt.name, args.allow_extra_5exon, internal_fuzzy_max_dist=args.max_fuzzy_junction)
os.rename(f_good.name, f_good.name+'.unfuzzy')
os.rename(f_txt.name, f_txt.name+'.unfuzzy')
os.rename(f_good.name+'.fuzzy', f_good.name)
os.rename(f_txt.name+'.fuzzy', f_txt.name)
if args.fq:
outfile = args.prefix+".collapsed.rep.fq"
else:
outfile = args.prefix+".collapsed.rep.fa"
if args.allow_extra_5exon: # 5merge, pick longest
pick_rep(args.input, f_good.name, f_txt.name, outfile, is_fq=args.fq, pick_least_err_instead=False, bad_gff_filename=f_bad.name)
else:
pick_rep(args.input, f_good.name, f_txt.name, outfile, is_fq=args.fq, pick_least_err_instead=True, bad_gff_filename=f_bad.name)
print("Ignored IDs written to: {0}".format(ignored_fout.name), file=sys.stdout)
print("Output written to: {0}\n{1}\n{2}\n{3}\n".format(f_good.name, f_txt.name, outfile, args), file=sys.stdout)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--input", help="Input FA/FQ filename")
parser.add_argument("--fq", default=False, action="store_true", help="Input is a fastq file (default is fasta)")
parser.add_argument("-s", "--sam", required=True, help="Sorted GMAP SAM filename")
parser.add_argument("-o", "--prefix", required=True, help="Output filename prefix")
parser.add_argument("-c", "--min-coverage", dest="min_aln_coverage", type=float, default=.99, help="Minimum alignment coverage (default: 0.99)")
parser.add_argument("-i", "--min-identity", dest="min_aln_identity", type=float, default=.95, help="Minimum alignment identity (default: 0.95)")
parser.add_argument("--max_fuzzy_junction", default=5, type=int, help="Max fuzzy junction dist (default: 5 bp)")
parser.add_argument("--max_5_diff", default=1000, type=int, help="Maximum allowed 5' difference if on same exon (default: 1000 bp)")
parser.add_argument("--max_3_diff", default=100, type=int, help="Maximum allowed 3' difference if on same exon (default: 100 bp)")
parser.add_argument("--flnc_coverage", dest="flnc_coverage", type=int, default=-1, help="Minimum # of FLNC reads, only use this for aligned FLNC reads, otherwise results undefined!")
parser.add_argument("--dun-merge-5-shorter", action="store_false", dest="allow_extra_5exon", default=True, help="Don't collapse shorter 5' transcripts (default: turned off)")
args = parser.parse_args()
main(args)
|
py | 7dfd4cfd944b0d6f4e1bc3c00e1b2cf39e0cd754 | """This module contains the general information for FirmwareInstallImpact ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareInstallImpactConsts:
SUBJECT_ADAPTOR = "adaptor"
SUBJECT_BIOS = "bios"
SUBJECT_BOARD_CONTROLLER = "board-controller"
SUBJECT_CIMC = "cimc"
SUBJECT_CMC = "cmc"
SUBJECT_GRAPHICS_CARD = "graphics-card"
SUBJECT_IOCARD = "iocard"
SUBJECT_ONBOARD_DEVICE = "onboard-device"
SUBJECT_SAS_EXP_REG_FW = "sas-exp-reg-fw"
SUBJECT_SAS_EXPANDER = "sas-expander"
SUBJECT_SERVER = "server"
SUBJECT_SERVICE_PROFILE = "service-profile"
SUBJECT_STORAGE_CONTROLLER = "storage-controller"
SUBJECT_SWITCH = "switch"
SUBJECT_SYSTEM = "system"
SUBJECT_UNKNOWN = "unknown"
TYPE_ACTIVATE = "activate"
TYPE_NOIMPACT = "noimpact"
TYPE_RESET = "reset"
TYPE_UPDATE = "update"
class FirmwareInstallImpact(ManagedObject):
"""This is FirmwareInstallImpact class."""
consts = FirmwareInstallImpactConsts()
naming_props = set(['keyDn'])
mo_meta = MoMeta("FirmwareInstallImpact", "firmwareInstallImpact", "fw-sys-InstallImpact-[key_dn]", VersionMeta.Version211a, "InputOutput", 0x3f, [], ["admin", "ls-config-policy", "ls-server-policy"], [], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"key_dn": MoPropertyMeta("key_dn", "keyDn", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, 0x8, 1, 510, None, [], []),
"maint_policy_dn": MoPropertyMeta("maint_policy_dn", "maintPolicyDn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"reboot_policy": MoPropertyMeta("reboot_policy", "rebootPolicy", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"subject": MoPropertyMeta("subject", "subject", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["adaptor", "bios", "board-controller", "cimc", "cmc", "graphics-card", "iocard", "onboard-device", "sas-exp-reg-fw", "sas-expander", "server", "service-profile", "storage-controller", "switch", "system", "unknown"], []),
"type": MoPropertyMeta("type", "type", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["activate", "noimpact", "reset", "update"], []),
}
prop_map = {
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"keyDn": "key_dn",
"maintPolicyDn": "maint_policy_dn",
"rebootPolicy": "reboot_policy",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"subject": "subject",
"type": "type",
}
def __init__(self, parent_mo_or_dn, key_dn, **kwargs):
self._dirty_mask = 0
self.key_dn = key_dn
self.child_action = None
self.descr = None
self.maint_policy_dn = None
self.reboot_policy = None
self.sacl = None
self.status = None
self.subject = None
self.type = None
ManagedObject.__init__(self, "FirmwareInstallImpact", parent_mo_or_dn, **kwargs)
|
py | 7dfd4da836ea5bf44de6fc47613ebedb356cccab | from django.conf.urls import url
import api.views
urlpatterns = [
url(r'^(?P<kind>catalogs$)', api.views.plist_api),
url(r'^(?P<kind>catalogs)/(?P<filepath>.*$)', api.views.plist_api),
url(r'^(?P<kind>manifests$)', api.views.plist_api),
url(r'^(?P<kind>manifests)/(?P<filepath>.*$)', api.views.plist_api),
url(r'^(?P<kind>pkgsinfo$)', api.views.plist_api),
url(r'^(?P<kind>pkgsinfo)/(?P<filepath>.*$)', api.views.plist_api),
url(r'^(?P<kind>icons$)', api.views.file_api),
url(r'^(?P<kind>icons)/(?P<filepath>.*$)', api.views.file_api),
url(r'^(?P<kind>pkgs$)', api.views.file_api),
url(r'^(?P<kind>pkgs)/(?P<filepath>.*$)', api.views.file_api),
]
|
py | 7dfd4df11306699d6bea578821efc6c94d250af1 | """SegmentationNN"""
import torch
import torch.nn as nn
import pytorch_lightning as pl
# class unetUp(nn.Module):
# def __init__(self, in_size, out_size, is_deconv):
# super(unetUp, self).__init__()
# self.conv = unetConv2(in_size, out_size, False)
# if is_deconv:
# self.up = nn.ConvTranspose2d(in_size, out_size, kernel_size=2, stride=2)
# else:
# self.up = nn.UpsamplingBilinear2d(scale_factor=2)
# def forward(self, inputs1, inputs2):
# outputs2 = self.up(inputs2)
# offset = outputs2.size()[2] - inputs1.size()[2]
# padding = 2 * [offset // 2, offset // 2]
# outputs1 = F.pad(inputs1, padding)
# return self.conv(torch.cat([outputs1, outputs2], 1))
class SegmentationNN(pl.LightningModule):
def __init__(self, num_classes=23, hparams=None):
super().__init__()
self.hparams = hparams
#######################################################################
# YOUR CODE #
#######################################################################
# filters = [64, 128, 256, 512, 1024]
filters = [32, 64, 128, 256, 512]
self.model = nn.Sequential(
# 64*236*236 -> 64*118*118
nn.Conv2d(3, filters[0], 3,padding=1),
nn.BatchNorm2d(filters[0]),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# nn.Dropout(p=0.25),
# 128*116*116 -> 128*58*58
nn.Conv2d(filters[0], filters[1], 3,padding=1),
nn.BatchNorm2d(filters[1]),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# 256*56*56 -> 256*28*28
nn.Conv2d(filters[1], filters[2], 3,padding=1),
nn.BatchNorm2d(filters[2]),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# 512*26*26 -> 512*13*13
nn.Conv2d(filters[2], filters[3], 3,padding=1),
nn.BatchNorm2d(filters[3]),
nn.ReLU(),
nn.MaxPool2d(2, 2),
# 1024*12*12 -> 1024*6*6
nn.Conv2d(filters[3], filters[4], 3,padding=1),
nn.BatchNorm2d(filters[4]),
nn.ReLU(),
# ------------ start up-sampling ------------#
# 1024*12*12 -> 512*11*11
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(filters[4], filters[3], 1),
nn.ReLU(),
# 512*22*22 -> 256*20*20
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(filters[3], filters[2], 1),
nn.ReLU(),
# 256*40*40 -> 128*38*38
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(filters[2], filters[1], 1),
nn.ReLU(),
# 128*76*76 -> 64*74*74
nn.UpsamplingBilinear2d(scale_factor=2),
nn.Conv2d(filters[1], filters[0], 1),
nn.ReLU(),
nn.Conv2d(filters[0], num_classes, 1)
)
# filters = [32, 64, 128, 256, 512]
# self.model = nn.Sequential(
# # 32*236*236
# nn.Conv2d(3, filters[0], 5),
# nn.ReLU(),
# # 32*236*236 -> 128*58*58
# nn.Conv2d(filters[0], filters[1], 3),
# nn.ReLU(),
# nn.Linear(128*11*11, self.hparams["n_hidden"]),
# # 64*236*236 -> 64*118*118
# nn.Conv2d(3, filters[0], 5,padding=2),
# nn.ReLU(),
# # 128*116*116 -> 128*58*58
# nn.Conv2d(filters[0], filters[1], 3,padding=1),
# nn.ReLU(),
# nn.Linear(128*11*11, self.hparams["n_hidden"]),
# # 64*236*236 -> 64*118*118
# nn.Conv2d(3, filters[0], 5,padding=2),
# nn.ReLU(),
# # 128*116*116 -> 128*58*58
# nn.Conv2d(filters[0], filters[1], 3,padding=1),
# nn.ReLU(),
# nn.MaxPool2d(2, 2),
# # ------------ start up-sampling ------------#
# # 1024*12*12 -> 512*11*11
# nn.UpsamplingBilinear2d(scale_factor=2),
# nn.Linear(128*11*11, self.hparams["n_hidden"]),
# nn.Conv2d(filters[4], filters[3], 3,padding=1),
# nn.ReLU(),
# # 512*22*22 -> 256*20*20
# nn.Conv2d(filters[3], filters[2], 3,padding=1),
# nn.ReLU(),
# nn.Linear(128*11*11, self.hparams["n_hidden"]),
# # 256*40*40 -> 128*38*38
# nn.Conv2d(filters[2], filters[1], 3,padding=1),
# nn.ReLU(),
# # 128*76*76 -> 64*74*74
# nn.Conv2d(filters[1], filters[0], 3,padding=1),
# nn.ReLU(),
# nn.Linear(128*11*11, self.hparams["n_hidden"]),
# nn.Conv2d(filters[2], filters[1], 3,padding=1),
# nn.ReLU(),
# nn.Conv2d(filters[2], filters[1], 3,padding=1),
# nn.ReLU(),
# nn.Conv2d(filters[0], num_classes, 1)
# )
#######################################################################
# END OF YOUR CODE #
#######################################################################
def forward(self, x):
"""
Forward pass of the convolutional neural network. Should not be called
manually but by calling a model instance directly.
Inputs:
- x: PyTorch input Variable
"""
#######################################################################
# YOUR CODE #
#######################################################################
x = self.model(x)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return x
@property
def is_cuda(self):
"""
Check if model parameters are allocated on the GPU.
"""
return next(self.parameters()).is_cuda
def save(self, path):
"""
Save model with its parameters to the given path. Conventionally the
path should end with "*.model".
Inputs:
- path: path string
"""
print('Saving model... %s' % path)
torch.save(self, path)
class DummySegmentationModel(pl.LightningModule):
def __init__(self, target_image):
super().__init__()
def _to_one_hot(y, num_classes):
scatter_dim = len(y.size())
y_tensor = y.view(*y.size(), -1)
zeros = torch.zeros(*y.size(), num_classes, dtype=y.dtype)
return zeros.scatter(scatter_dim, y_tensor, 1)
target_image[target_image == -1] = 1
self.prediction = _to_one_hot(target_image, 23).permute(2, 0, 1).unsqueeze(0)
def forward(self, x):
return self.prediction.float()
|
py | 7dfd4f4975b8c56b206723a81979fc290824fdfd | import time
import tools
import config
import random
import setting
from request import http
Today_getcoins = 0
Today_have_getcoins = 0 #这个变量以后可能会用上,先留着了
Have_coins = 0
class mihoyobbs:
def __init__(self):
self.headers = {
"DS": tools.Get_ds(web=False, web_old=False),
"cookie": f"stuid={config.mihoyobbs_Stuid};stoken={config.mihoyobbs_Stoken}",
"x-rpc-client_type": setting.mihoyobbs_Client_type,
"x-rpc-app_version": setting.mihoyobbs_Version,
"x-rpc-sys_version": "6.0.1",
"x-rpc-channel": "mihoyo",
"x-rpc-device_id": tools.Get_deviceid(),
"x-rpc-device_name": tools.Random_text(random.randint(1, 10)),
"x-rpc-device_model": "Mi 10",
"Referer": "https://app.mihoyo.com",
"Host": "bbs-api.mihoyo.com",
"User-Agent": "okhttp/4.8.0"
}
self.Task_do = {
"bbs_Sign": False,
"bbs_Read_posts": False,
"bbs_Read_posts_num": 3,
"bbs_Like_posts": False,
"bbs_Like_posts_num": 5,
"bbs_Share": False
}
self.Get_taskslist()
#如果这三个任务都做了就没必要获取帖子了
if self.Task_do["bbs_Read_posts"] and self.Task_do["bbs_Like_posts"] and self.Task_do["bbs_Share"]:
pass
else:
self.postsList = self.Getlist()
#获取任务列表,用来判断做了哪些任务
def Get_taskslist(self):
global Today_getcoins
global Today_have_getcoins
global Have_coins
tools.log.info("正在获取任务列表")
req = http.get(url=setting.bbs_Taskslist, headers=self.headers)
data = req.json()
if "err" in data["message"]:
tools.log.info("获取任务列表失败,你的cookie可能已过期,请重新设置cookie。")
config.Clear_cookies()
exit(1)
else:
Today_getcoins = data["data"]["can_get_points"]
Today_have_getcoins = data["data"]["already_received_points"]
Have_coins = data["data"]["total_points"]
#如果当日可获取米游币数量为0直接判断全部任务都完成了
if Today_getcoins == 0:
self.Task_do["bbs_Sign"] = True
self.Task_do["bbs_Read_posts"] = True
self.Task_do["bbs_Like_posts"] = True
self.Task_do["bbs_Share"] = True
else:
#如果第0个大于或等于62则直接判定任务没做
if data["data"]["states"][0]["mission_id"] >= 62:
tools.log.info(f"新的一天,今天可以获得{Today_getcoins}个米游币")
pass
else:
tools.log.info(f"似乎还有任务没完成,今天还能获得{Today_getcoins}")
for i in data["data"]["states"]:
#58是讨论区签到
if i["mission_id"] == 58:
if i["is_get_award"]:
self.Task_do["bbs_Sign"] = True
#59是看帖子
elif i["mission_id"] == 59:
if i["is_get_award"]:
self.Task_do["bbs_Read_posts"] = True
else:
self.Task_do["bbs_Read_posts_num"] -= i["happened_times"]
#60是给帖子点赞
elif i["mission_id"] == 60:
if i["is_get_award"]:
self.Task_do["bbs_Like_posts"] = True
else:
self.Task_do["bbs_Like_posts_num"] -= i["happened_times"]
#61是分享帖子
elif i["mission_id"] == 61:
if i["is_get_award"]:
self.Task_do["bbs_Share"] = True
#分享帖子,是最后一个任务,到这里了下面都是一次性任务,直接跳出循环
break
#获取要帖子列表
def Getlist(self) -> list:
temp_List = []
tools.log.info("正在获取帖子列表......")
req = http.get(url=setting.bbs_Listurl.format(setting.mihoyobbs_List_Use[0]["forumId"]), headers=self.headers)
data = req.json()
for n in range(5):
temp_List.append([data["data"]["list"][n]["post"]["post_id"], data["data"]["list"][n]["post"]["subject"]])
tools.log.info("已获取{}个帖子".format(len(temp_List)))
return temp_List
#进行签到操作
def Signin(self):
if self.Task_do["bbs_Sign"]:
tools.log.info("讨论区任务已经完成过了~")
else:
tools.log.info("正在签到......")
for i in setting.mihoyobbs_List_Use:
req = http.post(url=setting.bbs_Signurl.format(i["id"]), data="" ,headers=self.headers)
data = req.json()
if "err" not in data["message"]:
tools.log.info(str(i["name"]+ data["message"]))
time.sleep(random.randint(2, 8))
else:
tools.log.info("签到失败,你的cookie可能已过期,请重新设置cookie。")
config.Clear_cookies()
exit(1)
#看帖子
def Readposts(self):
if self.Task_do["bbs_Read_posts"]:
tools.log.info("看帖任务已经完成过了~")
else:
tools.log.info("正在看帖......")
for i in range(self.Task_do["bbs_Read_posts_num"]):
req = http.get(url=setting.bbs_Detailurl.format(self.postsList[i][0]), headers=self.headers)
data = req.json()
if data["message"] == "OK":
tools.log.info("看帖:{} 成功".format(self.postsList[i][1]))
time.sleep(random.randint(2, 8))
#点赞
def Likeposts(self):
if self.Task_do["bbs_Like_posts"]:
tools.log.info("点赞任务已经完成过了~")
else:
tools.log.info("正在点赞......")
for i in range(self.Task_do["bbs_Like_posts_num"]):
req = http.post(url=setting.bbs_Likeurl, headers=self.headers,
json={"post_id": self.postsList[i][0], "is_cancel": False})
data = req.json()
if data["message"] == "OK":
tools.log.info("点赞:{} 成功".format(self.postsList[i][1]))
#判断取消点赞是否打开
if config.mihoyobbs["bbs_Unlike"]:
time.sleep(random.randint(2, 8))
req = http.post(url=setting.bbs_Likeurl, headers=self.headers,
json={"post_id": self.postsList[i][0], "is_cancel": True})
data = req.json()
if data["message"] == "OK":
tools.log.info("取消点赞:{} 成功".format(self.postsList[i][1]))
time.sleep(random.randint(2, 8))
#分享操作
def Share(self):
if self.Task_do["bbs_Share"]:
tools.log.info("分享任务已经完成过了~")
else:
tools.log.info("正在分享......")
req = http.get(url=setting.bbs_Shareurl.format(self.postsList[0][0]), headers=self.headers)
data = req.json()
if data["message"] == "OK":
tools.log.info("分享:{} 成功".format(self.postsList[0][1]))
time.sleep(random.randint(2, 8)) |
py | 7dfd4fbeae497226adb0250f8e0333ac04dc6a94 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
detect period error of Sejong corpus
__author__ = 'Jamie ([email protected])'
__copyright__ = 'Copyright (C) 2019-, Kakao Corp. All rights reserved.'
"""
###########
# imports #
###########
from argparse import ArgumentParser
import logging
import os
import re
import sys
from typing import Iterator, TextIO, Tuple
from khaiii.munjong.sejong_corpus import Morph, WORD_ID_PTN
#############
# functions #
#############
def _get_two_lines(fin: TextIO) -> Iterator[Tuple[str, str]]:
"""
get two lines tuple from file (generator)
Args:
fin: input file
Yields:
current line
next line
"""
curr_line = fin.readline().rstrip('\r\n')
for next_line in fin:
next_line = next_line.rstrip('\r\n')
yield curr_line, next_line
curr_line = next_line
def _is_correct_eos(line: str) -> bool:
"""
whether correct end of sentence or not
Args:
line: line (word)
Returns:
whether correct or not
"""
_, _, morphs_str = line.split('\t')
if re.match(r'.+/EF \+ ./SF$', morphs_str):
return True
if re.match(r'.+/SF \+ [\'"’”」\]]/SS$', morphs_str):
return True
morphs = [Morph.parse(_) for _ in morphs_str.split(' + ')]
tags_str = '+'.join([_.tag for _ in morphs])
if tags_str.endswith('+SF+SS+JKQ') or tags_str.endswith('+SF+SS+VCP+ETM'):
return True
return False
def run():
"""
run function which is the start point of program
"""
file_name = os.path.basename(sys.stdin.name)
for line_num, (curr_line, next_line) in enumerate(_get_two_lines(sys.stdin), start=1):
cols = curr_line.split('\t')
if len(cols) != 3 or not WORD_ID_PTN.match(cols[0]):
continue
if '/SF + ' not in cols[2] or not next_line.startswith('</'):
continue
if _is_correct_eos(curr_line):
continue
print('{}:{}\t{}'.format(file_name, line_num, curr_line))
########
# main #
########
def main():
"""
main function processes only argument parsing
"""
parser = ArgumentParser(description='detect period error of Sejong corpus')
parser.add_argument('--input', help='input file <default: stdin>', metavar='FILE')
parser.add_argument('--output', help='output file <default: stdout>', metavar='FILE')
parser.add_argument('--debug', help='enable debug', action='store_true')
args = parser.parse_args()
if args.input:
sys.stdin = open(args.input, 'rt')
if args.output:
sys.stdout = open(args.output, 'wt')
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
run()
if __name__ == '__main__':
main()
|
py | 7dfd5083481e7abbb82124fe4ee1d6885a38c9ff | # Import the libraries we need
import requests
import time
from megapi import *
if __name__ == '__main__':
#bot = MegaPi()
#bot.start('/dev/ttyUSB0')
#bot.motorRun(1,0)
#Let us fetch data and evaluate label.
while True:
r = requests.get('http://ur-pi.local:3333/')
label = r.text
if label == "no":
#bot.motorRun(1,10)
time.sleep(2)
print("no")
#bot.motorRun(1.0)
elif label == "cone":
#bot.motorRun(2,10)
#bot.motorRun(1,5)
print("cone - turn")
time.sleep(3)
#bot.motorRun(2,-10)
print("cone - turn back")
time.sleep(3)
elif label == "LEGOMan":
#bot.motorRun(1,30)
print("kill the lego man")
time.sleep(1) |
py | 7dfd511e20f932dbba9867041641a49599a29234 | ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
SVMSemI - Support Vector Machine Semantic Decoder
=====================================================================
To use this in pydial, need to set "semitype = SVMSemI" for a domain in the relevant interface config file
(in the current state it is the CamRestaurants domain)
See texthub_svm.cfg, which can be used for this purpose for texthub interface
Copyright CUED Dialogue Systems Group 2015 - 2017
.. seealso:: Discriminative Spoken Language Understanding Using Word Confusion Networks
http://mi.eng.cam.ac.uk/~sjy/papers/hgtt12.pdf
.. seealso:: CUED Imports/Dependencies:
import :mod:`semi.SemI` |.|
import :mod:`utils.ContextLogger` |.|
import :mod: `decode.svmdec` |semi/CNetTrain|
************************
Important: please see semi/CNetTrain/README.md
'''
import os, sys, configparser
from utils import ContextLogger
logger = ContextLogger.getLogger('')
import imp
from semi import SemI
old_path = os.getcwd()
if "semi" not in old_path:
path = old_path+"/semi/CNetTrain/"
else:
path = old_path+"/CNetTrain/"
os.sys.path.insert(1, path)
from semi.CNetTrain import decode as svmdec
import math
import time
from semi import RegexSemI
print(sys.path)
__author__ = "cued_dialogue_systems_group"
class SVMSemI(SemI.SemI):
def __init__(self):
'''
Initialise some objects, use RegexSemI to solve classification errors, and to deal with
googdbye and request for alternatives
:return:
'''
self.RSemI = RegexSemI.RegexSemI() # For goodbye and request alternatives in decode
self.config = configparser.ConfigParser(allow_no_value=True)
self.config.read(path+"/config/eg.cfg")
self.classifiers=svmdec.init_classifier(self.config)
self.sys_act = []
def decode(self, ASR_obs, sys_act=None, turn=None):
'''
Includes os.chdir to change directories from pydial root to the locally installed inside semi directory.
Directories are changed back to pydial root after prediction. This ensures all the required
config and data files are accessed.
:param ASR_obs: hypothesis with the ASR N-best list
:param sys_act: previous system dialogue act
:param turn: turn id
:return: Semantic representation from the asr output
'''
#Check first general dialogue acts with Regular Expressions
regexpred = self.decode_general_hypothesis(ASR_obs[0][0])
if "bye()" in regexpred:
return [("bye()", 1.0)]
elif "reqalts()" in regexpred:
return [("reqalts()", 1.0)]
elif "affirm()" in regexpred:
return [("affirm()",1.0)]
elif "negate()"in regexpred:
return [("negate()",1.0)]
elif "hello()" in regexpred:
return [("hello()",1.0)]
else:
old_path = os.getcwd()
os.chdir(path)
sentinfo = self.input_json(ASR_obs, self.sys_act, turn)
before = int(round(time.time() * 1000))
predictions = svmdec.decode(self.classifiers,self.config, sentinfo)
after = int(round(time.time() * 1000))
pred_dur = after - before
logger.debug("prediction time: %d" % pred_dur) # Time taken by DLSemI for prediction
os.chdir(old_path)
logger.info(predictions)
self.semActs = self.format_semi_output(predictions)
logger.info(self.semActs)
return self.semActs
def input_json(self, ASR_obs, sys_act, turn):
'''
Format the incoming ASR_obs and sys_act into an input for SVM Classifiers in JSON
:param ASR_obs: ASR hypothesis
:param sys_act: Last system action
:param turn: Turn id
:return:
'''
logger.info(ASR_obs)
sentinfo = {}
asrhyps = []
for obs in ASR_obs:
asrhyps.append(dict([ ('asr-hyp', str(obs[0])), ('score', math.log(obs[1]))]))
sentinfo['turn-id'] = turn
sentinfo['asr-hyps'] = asrhyps
sentinfo['prevsysacts'] = []
return sentinfo
def format_semi_output(self, sluhyps):
'''
Transform the output of SVM classifier to make it compatible with cued-pydial system
:param sluhyps: output coming from SVMSemI
:return: SVMSemI output in the required format for cued-pydial
'''
prediction_clean=[]
for hyp in sluhyps:
if not hyp["slu-hyp"]:
prediction_clean = [('null()',hyp['score'])]
continue
probability = hyp['score']
slu_hyp=hyp["slu-hyp"]
for sluh in slu_hyp:
dact = sluh['act']
pred_str=str(dact)
prediction_string = []
if not sluh['slots']:
prediction_string.append(pred_str+"()")
for slot in sluh['slots']:
prediction_string.append('%s(%s=%s)' % (str(dact), str(slot[0]), str(slot[1])))
prediction_string = '|'.join(prediction_string)
prediction_clean.append((prediction_string, probability))
return prediction_clean
def decode_general_hypothesis(self, obs):
'''
Regular expressions for bye() and reqalts(), affirm and type
:param obs: ASR hypothesis
:return: RegexSemI recognised dialogue act
'''
self.RSemI.semanticActs = []
self.RSemI._decode_reqalts(obs)
self.RSemI._decode_bye(obs)
self.RSemI._decode_type(obs)
self.RSemI._decode_affirm(obs)
return self.RSemI.semanticActs
if __name__ == '__main__':
svm=SVMSemI()
#preds=svm.decode([('I am looking for a chinese restaurant in the north',1.0)])
preds=svm.decode([('I am looking for restaurant',1.0)])
print(preds)
preds=[]
#preds=svm.decode([('something in the north',1.0)])
preds=svm.decode( [(' I am looking for a cheap restaurant', 1.0)])
print(preds)
preds=svm.decode( [('something in the north', 1.0)])
print(preds)
|
py | 7dfd51df41455818367256cc17da270c0c2b44a7 | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin P2P network half-a-node.
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
NodeConn: an object which manages p2p connectivity to a bitcoin node
NodeConnCB: a base class that describes the interface for receiving
callbacks with network messages from a NodeConn
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization
"""
import asyncore
from codecs import encode
from collections import defaultdict
import copy
import hashlib
from io import BytesIO
import logging
import random
import socket
import struct
import sys
import time
from threading import RLock, Thread
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, bytes_to_hex_str
BIP0031_VERSION = 60000
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_UNSUPPORTED_SERVICE_BIT_5 = (1 << 5)
NODE_UNSUPPORTED_SERVICE_BIT_7 = (1 << 7)
ACH_REGTEST_HARDFORK_HEIGHT = 3000
logger = logging.getLogger("TestFramework.mininode")
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
def deser_byte_vector(f):
return deser_string(f)
def ser_byte_vector(l):
return ser_string(l)
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nHeight = header.nHeight
self.nReserved = copy.copy(header.nReserved)
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nSolution = header.nSolution
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nHeight = 0
self.nReserved = [0] * 7
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
self.nSolution = b""
def deserialize(self, f, legacy=True):
if legacy:
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.nHeight = 0
self.nReserved = [0] * 7
self.nSolution = b""
else:
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nHeight = struct.unpack("<I", f.read(4))[0]
self.nReserved = [struct.unpack("<I", f.read(4))[0] for _ in range(7)]
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = deser_uint256(f)
self.nSolution = deser_byte_vector(f)
self.sha256 = None
self.hash = None
def serialize_header(self, legacy=True):
r = b""
if legacy:
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce & 0xFFFFFFFF)
return r
else:
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nHeight)
for i in range(7):
r += struct.pack("<I", self.nReserved[i])
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += ser_uint256(self.nNonce)
r += ser_byte_vector(self.nSolution)
return r
def serialize(self, legacy=True):
return self.serialize_header(legacy=legacy)
def calc_sha256(self):
if self.sha256 is None:
if self.nHeight < ACH_REGTEST_HARDFORK_HEIGHT:
r = self.serialize_header(legacy=True)
else:
r = self.serialize_header(legacy=False)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nHeight=%d nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.nHeight,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f, legacy=True):
super(CBlock, self).deserialize(f, legacy=legacy)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
# TODO(h4x3rotab): Not implemented for Equihash.
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
# TODO(h4x3rotab): Not implemented for Equihash.
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nHeight=%d nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.nHeight,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
class NodeConnCB(object):
"""Callback and helper functions for P2P connection to a bitcoind node.
Individual testcases should subclass this and override the on_* methods
if they want to alter message handling behaviour.
"""
def __init__(self):
# Track whether we have a P2P connection open to the node
self.connected = False
self.connection = None
# Track number of messages of each type received and the most recent
# message of each type
self.message_count = defaultdict(int)
self.last_message = {}
# A count of the number of ping messages we've sent to the node
self.ping_counter = 1
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
# Message receiving methods
def deliver(self, conn, message):
"""Receive message and dispatch message to appropriate callback.
We keep a count of how many of each message type has been received
and the most recent message of each type.
Optionally waits for deliver_sleep_time before dispatching message.
"""
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
command = message.command.decode('ascii')
self.message_count[command] += 1
self.last_message[command] = message
getattr(self, 'on_' + command)(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Callback methods. Can be overridden by subclasses in individual test
# cases to provide custom message handling behaviour.
def on_open(self, conn):
self.connected = True
def on_close(self, conn):
self.connected = False
self.connection = None
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_block(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_reject(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
# Connection helper methods
def add_connection(self, conn):
self.connection = conn
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.connected
assert wait_until(test_function, timeout=timeout)
# Message receiving helper methods
def wait_for_block(self, blockhash, timeout=60):
test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
assert wait_until(test_function, timeout=timeout)
def wait_for_getdata(self, timeout=60):
test_function = lambda: self.last_message.get("getdata")
assert wait_until(test_function, timeout=timeout)
def wait_for_getheaders(self, timeout=60):
test_function = lambda: self.last_message.get("getheaders")
assert wait_until(test_function, timeout=timeout)
def wait_for_inv(self, expected_inv, timeout=60):
"""Waits for an INV message and checks that the first inv object in the message was as expected."""
if len(expected_inv) > 1:
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
test_function = lambda: self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
assert wait_until(test_function, timeout=timeout)
def wait_for_verack(self, timeout=60):
test_function = lambda: self.message_count["verack"]
assert wait_until(test_function, timeout=timeout)
# Message sending helper functions
def send_message(self, message):
if self.connection:
self.connection.send_message(message)
else:
logger.error("Cannot send message. No connection to node!")
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
# Sync up with the node
def sync_with_ping(self, timeout=60):
self.send_message(msg_ping(nonce=self.ping_counter))
test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
assert wait_until(test_function, timeout=timeout)
self.ping_counter += 1
return True
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
logger.info('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def handle_connect(self):
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
logger.warning("Received unknown command from %s:%d: '%s' %s" % (self.dstaddr, self.dstport, command, repr(msg)))
except Exception as e:
logger.exception('got_data:', repr(e))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self._log_message("receive", message)
self.cb.deliver(self, message)
def _log_message(self, direction, msg):
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
log_message = "Received message from "
log_message += "%s:%d: %s" % (self.dstaddr, self.dstport, repr(msg)[:500])
if len(log_message) > 500:
log_message += "... (msg truncated)"
logger.debug(log_message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
py | 7dfd52eda1f2d1edc2603fa4546ae689d56207e4 | def fisrt_and_last(array, value):
"""
O_time(log(n))
O_space(1)
"""
p1 = int(len(array)/2)
p2 = p1
b1 = (0, len(array)-1)
b2 = (0, len(array)-1)
while b1[0] != b1[1]-1:
if array[p1] < value:
b1[0] = p1
p1 = int(p1/2)
if array[p1] >= value:
b1[1] = p1
p1 = int(len(array)-(len(array)-p1)/2)
while b2[0] != b2[1]-1:
if array[p2] <= value:
b2[0] = p2
p2 = int(p2/2)
if array[p2] > value:
b2[1] = p2
p2 = int(len(array)-(len(array)-p2)/2)
return [p1,p2]
|
py | 7dfd52f3c2bc2850dd2421bf7f024f67776d4fc2 | import logging
from util.regex import REGULAR_PAGE_RANGE_REGEX, NUMBERED_PAGE_RANGE_REGEX
logger = logging.getLogger("dblp-retriever_logger")
class Paper(object):
""" Paper metadata from DBLP. """
def __init__(self, venue, year, identifier, heading, paper_id, title, authors, page_range, electronic_edition):
self.venue = venue
self.year = year
self.identifier = identifier
self.heading = heading
self.paper_id = paper_id
self.title = title
self.authors = authors
self.article_number = -1
self.electronic_edition = electronic_edition
self.comment = ""
if page_range is not None:
self.page_range = page_range
self.first_page = -1
self.last_page = -1
self.length = -1
self.regular_page_range = REGULAR_PAGE_RANGE_REGEX.fullmatch(page_range)
self.numbered_page_range = NUMBERED_PAGE_RANGE_REGEX.fullmatch(page_range)
# determine paper length
if page_range == "":
# empty page range
self.first_page = -1
self.last_page = -1
self.length = 0
self.append_comment("empty_page_range")
logger.warning("Empty page range for paper " + str(self))
elif self.regular_page_range:
page_range = Paper.split_page_range(self.page_range)
if len(page_range) == 1:
# only one page, e.g. "5"
self.first_page = int(page_range[0])
self.last_page = int(page_range[0])
self.length = 1
elif len(page_range) == 2:
# regular page range, e.g. "60-71"
self.first_page = int(page_range[0])
self.last_page = int(page_range[1])
self.length = self.last_page - self.first_page + 1
elif self.numbered_page_range:
page_range = Paper.split_numbered_page_range(self.page_range)
if len(page_range) == 2:
# only one page, e.g. "27:1"
self.article_number = int(page_range[0])
self.first_page = int(page_range[1])
self.last_page = int(page_range[1])
self.length = 1
elif len(page_range) == 4:
# numbered article page range, e.g., "18:1-18:33"
self.article_number = int(page_range[0])
self.first_page = int(page_range[1])
self.last_page = int(page_range[3])
self.length = self.last_page - self.first_page + 1
def append_comment(self, comment):
if self.comment == "":
self.comment = comment
else:
self.comment = self.comment + ";" + comment
def __str__(self):
return str(self.electronic_edition)
def get_column_values(self, with_pages=False):
if with_pages:
return [self.venue, self.year, self.identifier, self.heading, self.paper_id, self.title, self.authors,
self.page_range, self.length, self.electronic_edition, self.comment]
else:
return [self.venue, self.year, self.identifier, self.heading, self.paper_id, self.title, self.authors,
self.electronic_edition, self.comment]
@classmethod
def get_column_names(cls, with_pages=False):
if with_pages:
return ["venue", "year", "identifier", "heading", "paper_id", "title", "authors", "page_range", "length",
"electronic_edition", "comment"]
else:
return ["venue", "year", "identifier", "heading", "paper_id", "title", "authors", "electronic_edition",
"comment"]
@classmethod
def split_page_range(cls, page_range):
return str(page_range).split("-")
@classmethod
def split_numbered_page_range(cls, numbered_page_range):
page_range = Paper.split_page_range(numbered_page_range)
fragments = []
for page in page_range:
fragments = fragments + str(page).split(":")
return fragments
|
py | 7dfd5303872a7e697d7ce8183705d600b5200384 | # -*- coding: utf-8 -*- #
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a command to list tiers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA, base.ReleaseTrack.BETA,
base.ReleaseTrack.ALPHA)
class Tiers(base.Group):
"""Provide a command to list tiers."""
|
py | 7dfd53420832c0c3629098310c957a11ea7145d1 | #!/usr/bin/env python3
# @generated AUTOGENERATED file. Do not Change!
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from functools import partial
from typing import Any, Callable, List, Mapping, Optional
from dataclasses_json import dataclass_json
from marshmallow import fields as marshmallow_fields
from .datetime_utils import fromisoformat
DATETIME_FIELD = field(
metadata={
"dataclasses_json": {
"encoder": datetime.isoformat,
"decoder": fromisoformat,
"mm_field": marshmallow_fields.DateTime(format="iso"),
}
}
)
@dataclass_json
@dataclass
class EquipmentPositionsQuery:
__QUERY__ = """
query EquipmentPositionsQuery($id: ID!) {
equipment: node(id: $id) {
... on Equipment {
equipmentType {
positionDefinitions {
id
name
}
}
positions {
definition {
id
name
}
attachedEquipment {
id
name
}
}
}
}
}
"""
@dataclass_json
@dataclass
class EquipmentPositionsQueryData:
@dataclass_json
@dataclass
class Node:
@dataclass_json
@dataclass
class EquipmentType:
@dataclass_json
@dataclass
class EquipmentPositionDefinition:
id: str
name: str
positionDefinitions: List[EquipmentPositionDefinition]
@dataclass_json
@dataclass
class EquipmentPosition:
@dataclass_json
@dataclass
class EquipmentPositionDefinition:
id: str
name: str
@dataclass_json
@dataclass
class Equipment:
id: str
name: str
definition: EquipmentPositionDefinition
attachedEquipment: Optional[Equipment] = None
equipmentType: EquipmentType
positions: List[EquipmentPosition]
equipment: Optional[Node] = None
data: Optional[EquipmentPositionsQueryData] = None
errors: Optional[Any] = None
@classmethod
# fmt: off
def execute(cls, client, id: str):
# fmt: off
variables = {"id": id}
response_text = client.call(cls.__QUERY__, variables=variables)
return cls.from_json(response_text).data
|
py | 7dfd53fe22f88277b5b32ddc1102cae991eaccc2 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer
POOL_DEFAULT = "--service-level 'Premium' --size 4"
VOLUME_DEFAULT = "--service-level 'Premium' --usage-threshold 100"
LOCATION = "westcentralus"
# No tidy up of tests required. The resource group is automatically removed
class AzureNetAppFilesSnapshotServiceScenarioTest(ScenarioTest):
def setup_vnet(self, rg, vnet_name, subnet_name):
self.cmd("az network vnet create -n %s --resource-group %s -l %s --address-prefix 10.5.0.0/16" % (vnet_name, rg, LOCATION))
self.cmd("az network vnet subnet create -n %s --vnet-name %s --address-prefixes '10.5.0.0/24' --delegations 'Microsoft.Netapp/volumes' -g %s" % (subnet_name, vnet_name, rg))
def current_subscription(self):
subs = self.cmd("az account show").get_output_in_json()
return subs['id']
def create_volume(self, account_name, pool_name, volume_name1, rg, tags=None):
vnet_name = self.create_random_name(prefix='cli-vnet-', length=24)
creation_token = volume_name1
vnet_name = "cli-vnet-lefr-02"
subnet_name = "cli-subnet-lefr-02"
tag = "--tags %s" % tags if tags is not None else ""
self.setup_vnet(rg, vnet_name, subnet_name)
self.cmd("netappfiles account create -g %s -a '%s' -l %s" % (rg, account_name, LOCATION)).get_output_in_json()
self.cmd("netappfiles pool create -g %s -a %s -p %s -l %s %s %s" % (rg, account_name, pool_name, LOCATION, POOL_DEFAULT, tag)).get_output_in_json()
volume1 = self.cmd("netappfiles volume create --resource-group %s --account-name %s --pool-name %s --volume-name %s -l %s %s --creation-token %s --vnet %s --subnet %s %s" % (rg, account_name, pool_name, volume_name1, LOCATION, VOLUME_DEFAULT, creation_token, vnet_name, subnet_name, tag)).get_output_in_json()
return volume1
@ResourceGroupPreparer()
def test_create_delete_snapshots(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
snapshot_name = self.create_random_name(prefix='cli-sn-', length=24)
rg = '{rg}'
volume = self.create_volume(account_name, pool_name, volume_name, rg)
snapshot = self.cmd("az netappfiles snapshot create -g %s -a %s -p %s -v %s -s %s -l %s --file-system-id %s" % (rg, account_name, pool_name, volume_name, snapshot_name, LOCATION, volume['fileSystemId'])).get_output_in_json()
assert snapshot['name'] == account_name + '/' + pool_name + '/' + volume_name + '/' + snapshot_name
snapshot_list = self.cmd("az netappfiles snapshot list --resource-group %s --account-name %s --pool-name %s --volume-name %s" % (rg, account_name, pool_name, volume_name)).get_output_in_json()
assert len(snapshot_list) == 1
self.cmd("az netappfiles snapshot delete -g %s -a %s -p %s -v %s -s %s" % (rg, account_name, pool_name, volume_name, snapshot_name))
snapshot_list = self.cmd("az netappfiles snapshot list --resource-group %s --account-name %s --pool-name %s --volume-name %s" % (rg, account_name, pool_name, volume_name)).get_output_in_json()
assert len(snapshot_list) == 0
@ResourceGroupPreparer()
def test_list_snapshots(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
snapshot_name1 = self.create_random_name(prefix='cli-sn-', length=24)
snapshot_name2 = self.create_random_name(prefix='cli-sn-', length=24)
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}')
self.cmd("az netappfiles snapshot create -g {rg} -a %s -p %s -v %s -s %s -l %s --file-system-id %s" % (account_name, pool_name, volume_name, snapshot_name1, LOCATION, volume['fileSystemId'])).get_output_in_json()
self.cmd("az netappfiles snapshot create -g {rg} -a %s -p %s -v %s -s %s -l %s --file-system-id %s" % (account_name, pool_name, volume_name, snapshot_name2, LOCATION, volume['fileSystemId'])).get_output_in_json()
snapshot_list = self.cmd("az netappfiles snapshot list -g {rg} -a %s -p %s -v %s" % (account_name, pool_name, volume_name)).get_output_in_json()
assert len(snapshot_list) == 2
@ResourceGroupPreparer()
def test_get_snapshot(self):
account_name = self.create_random_name(prefix='cli-acc-', length=24)
pool_name = self.create_random_name(prefix='cli-pool-', length=24)
volume_name = self.create_random_name(prefix='cli-vol-', length=24)
snapshot_name = self.create_random_name(prefix='cli-sn-', length=24)
volume = self.create_volume(account_name, pool_name, volume_name, '{rg}')
snapshot = self.cmd("az netappfiles snapshot create -g {rg} -a %s -p %s -v %s -s %s -l %s --file-system-id %s" % (account_name, pool_name, volume_name, snapshot_name, LOCATION, volume['fileSystemId'])).get_output_in_json()
snapshot = self.cmd("az netappfiles snapshot show -g {rg} -a %s -p %s -v %s -s %s" % (account_name, pool_name, volume_name, snapshot_name)).get_output_in_json()
assert snapshot['name'] == account_name + '/' + pool_name + '/' + volume_name + '/' + snapshot_name
snapshot_from_id = self.cmd("az netappfiles snapshot show --ids %s" % snapshot['id']).get_output_in_json()
assert snapshot_from_id['name'] == account_name + '/' + pool_name + '/' + volume_name + '/' + snapshot_name
|
py | 7dfd54317b5542af9f15db32cda7dd9386e4ab63 | # pylint: disable=invalid-name, unused-argument
"""CoreML frontend."""
from __future__ import absolute_import as _abs
import numpy as np
import tvm
from .. import symbol as _sym
from .._base import string_types
from .common import SymbolTable
__all__ = ['from_coreml']
def NeuralNetworkImageScaler(op, insym, symtab):
# this changes the symbol
biases = np.array([op.blueBias, op.greenBias, op.redBias]).reshape([3, 1, 1])
bias = symtab.new_const(biases)
ret = _sym.__mul_scalar__(insym, scalar=op.channelScale)
ret = _sym.broadcast_add(ret, bias)
return ret
def NeuralNetworkMeanImage(op, insym, symtab):
# this changes the symbol
ret = _sym.elemwise_sub(insym, scalar=op.meanImage)
return ret
def ConvolutionLayerParams(op, insym, symtab):
"""Convolution layer params."""
weights = symtab.new_const(np.array(list(op.weights.floatValue)).reshape(
tuple([op.outputChannels, op.kernelChannels] + list(op.kernelSize))))
if op.hasBias:
biases = symtab.new_const(list(op.bias.floatValue))
dilation = list(op.dilationFactor)
if not dilation:
dilation = [1, 1]
params = {'channels':op.outputChannels,
'kernel_size':list(op.kernelSize),
'strides':list(op.stride),
'dilation': dilation,
'use_bias': op.hasBias,
'groups':op.nGroups}
if op.WhichOneof('ConvolutionPaddingType') == 'valid':
valid = op.valid
padding = [b.startEdgeSize for b in valid.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in valid.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j, "Asymmetry padding not supported"
if padding:
params['padding'] = padding
elif op.WhichOneof('ConvolutionPaddingType') == 'same':
kernel = params['kernel_size']
pad_h = kernel[0] - 1
pad_w = kernel[1] - 1
pad_t = pad_h // 2
pad_l = pad_w // 2
pad_b = pad_h - pad_t
pad_r = pad_w - pad_l
assert pad_t == pad_r and pad_l == pad_b, "Asymmetry padding not supported"
params['padding'] = [pad_t, pad_l]
else:
raise NotImplementedError("Valid/Same convolution padding implemented")
if op.hasBias:
pos = [insym, weights, biases]
else:
pos = [insym, weights]
if op.isDeconvolution:
ret = _sym.conv2d_transpose(*pos, **params)
else:
ret = _sym.conv2d(*pos, **params)
# consume padding layer
if symtab.in_padding:
params['padding'] = [sum(x) for x in zip(params.get('padding', [0, 0]), symtab.paddings)]
symtab.clear_padding()
return ret
def BatchnormLayerParams(op, insym, symtab):
"""Get layer of batchnorm parameter"""
# this changes the symbol
if op.instanceNormalization:
raise NotImplementedError("instance normalization not implemented")
else:
params = {'gamma':symtab.new_const(list(op.gamma.floatValue)),
'beta':symtab.new_const(list(op.beta.floatValue)),
'moving_mean':symtab.new_const(list(op.mean.floatValue)),
'moving_var': symtab.new_const(list(op.variance.floatValue)),
'epsilon': op.epsilon}
return _sym.batch_norm(data=insym, **params)
def ActivationParams(op, insym, symtab):
"""Get activation parameters"""
whichActivation = op.WhichOneof('NonlinearityType')
par = getattr(op, whichActivation)
if whichActivation == 'linear':
return _sym.__add_scalar__(_sym.__mul_scalar__(insym, scalar=par.alpha), scalar=par.beta)
elif whichActivation == 'ReLU':
return _sym.relu(insym)
elif whichActivation == 'leakyReLU':
return _sym.leaky_relu(insym, alpha=par.alpha)
elif whichActivation == 'thresholdedReLU':
alpha_tensor = _sym.full_like(insym, fill_value=float(par.alpha))
return _sym.elemwise_mul(insym, _sym.greater(insym, alpha_tensor))
elif whichActivation == 'PReLU':
return _sym.prelu(insym, alpha=par.alpha)
elif whichActivation == 'tanh':
return _sym.tanh(insym)
elif whichActivation == 'scaledTanh':
return _sym.__mul_scalar__(_sym.tanh(_sym.__mul_scalar__(
insym, scalar=par.beta)), scalar=par.alpha)
elif whichActivation == 'sigmoid':
return _sym.sigmoid(insym)
elif whichActivation == 'sigmoidHard':
transformX = (par.alpha * insym) + par.beta
return _sym.clip(transformX, a_min=0, a_max=1)
elif whichActivation == 'ELU':
return _sym.__mul_scalar__(_sym.__add_scalar__(
_sym.exp(insym), scalar=-1), scalar=par.alpha)
elif whichActivation == 'softsign':
return insym / (1 + (_sym.relu(insym) + _sym.relu(_sym.negative(insym))))
elif whichActivation == 'softplus':
return _sym.log(_sym.__add_scalar__(_sym.exp(insym), scalar=1))
elif whichActivation == 'parametricSoftplus':
alpha = list(par.alpha.floatValue)
beta = list(par.alpha.floatValue)
if len(alpha) == 1:
return _sym.__mul_scalar__(_sym.log(_sym.__add_scalar__(
_sym.exp(insym), scalar=beta[0])), scalar=alpha[0])
alpha = np.array(alpha).reshape((len(alpha), 1, 1))
beta = np.array(beta).reshape((len(beta), 1, 1))
alphasym = symtab.new_const(alpha)
betasym = symtab.new_const(beta)
return _sym.broadcast_mul(_sym.log(_sym.broadcast_add(
_sym.exp(insym), betasym)), alphasym)
else:
raise NotImplementedError('%s not implemented' % whichActivation)
def ScaleLayerParams(op, insym, symtab):
"""Scale layer params."""
scale = symtab.new_const(np.array(list(op.scale.floatValue)).reshape(
tuple(list(op.shapeScale) + [1, 1])))
# scale = _sym.reshape(scale, shape=tuple(list(op.shapeScale) + [1,1]))
ret = _sym.broadcast_mul(insym, scale)
if op.hasBias:
bias = symtab.new_const(np.array(list(op.bias.floatValue)).reshape(
tuple(list(op.shapeBias) + [1, 1])))
# bias = _sym.reshape(bias, shape=tuple(list(op.shapeBias) + [1,1]))
ret = _sym.broadcast_add(ret, bias)
return ret
def PoolingLayerParams(op, insym, symtab):
"""get pooling parameters"""
if op.globalPooling:
if op.type == 0:
return _sym.global_max_pool2d(insym)
elif op.type == 1:
return _sym.global_avg_pool2d(insym)
else:
raise NotImplementedError("Only max and average pooling implemented")
else:
params = {'pool_size':list(op.kernelSize),
'strides':list(op.stride)}
if op.WhichOneof('PoolingPaddingType') == 'valid':
valid = op.valid
padding = [b.startEdgeSize for b in valid.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in valid.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
params['padding'] = padding
elif op.WhichOneof('PoolingPaddingType') == 'includeLastPixel':
# I don't know if this is correct
valid = op.includeLastPixel
padding = list(valid.paddingAmounts)
params['padding'] = padding
params['ceil_mode'] = True
else:
raise NotImplementedError("Other convolution padding not implemented")
# consume padding layer
if symtab.in_padding:
params['padding'] = [sum(x) for x in zip(
params.get('padding', [0, 0]), symtab.paddings)]
symtab.clear_padding()
if op.type == 0:
return _sym.max_pool2d(insym, **params)
elif op.type == 1:
return _sym.avg_pool2d(insym, **params)
else:
raise NotImplementedError("Only max and average pooling implemented")
def SoftmaxLayerParams(op, insym, symtab):
return _sym.softmax(_sym.flatten(insym))
def InnerProductLayerParams(op, insym, symtab):
weights = symtab.new_const(np.array(op.weights.floatValue).reshape(
(op.outputChannels, op.inputChannels)))
par = {'weight':weights, 'use_bias':False, 'units':op.outputChannels}
if op.hasBias:
bias = symtab.new_const(np.array(op.bias.floatValue))
par['bias'] = bias
par['use_bias'] = True
return _sym.dense(data=insym, **par)
def AddLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
ret = insyms[0]
for i in range(1, len(insyms)):
ret = _sym.elemwise_add(ret, insyms[i])
if op.alpha > 0:
ret = _sym.__add_scalar__(ret, scalar=op.alpha)
return ret
def MultiplyLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
ret = insyms[0]
for i in range(1, len(insyms)):
ret = _sym.elemwise_mul(ret, insyms[i])
if op.alpha != 1:
ret = _sym.__mul_scalar__(ret, scalar=op.alpha)
return ret
def ConcatLayerParams(op, insyms, symtab):
if not isinstance(insyms, list):
insyms = [insyms]
if op.sequenceConcat:
raise NotImplementedError("Sequence Concat not supported")
ret = _sym.concatenate(*insyms, axis=1)
return ret
def FlattenLayerParams(op, insym, symtab):
if op.mode == 1:
insym = _sym.transpose(_sym.reshape(insym, shape=(0, 0, -1)), axes=(0, 2, 1))
return _sym.flatten(insym)
def PaddingLayerParams(op, insym, symtab):
"""Hacking for padding layer params."""
if op.WhichOneof('PaddingType') == 'constant':
constant = op.constant
if constant.value != 0:
raise NotImplementedError("Padding value {} not supported.".format(constant.value))
padding = [b.startEdgeSize for b in op.paddingAmounts.borderAmounts]
padding2 = [b.endEdgeSize for b in op.paddingAmounts.borderAmounts]
for i, j in zip(padding, padding2):
assert i == j
symtab.set_padding(padding)
else:
raise NotImplementedError("Only constant padding is supported now.")
return insym
def PermuteLayerParams(op, insym, symtab):
axes = tuple(op.axis)
return _sym.transpose(insym, axes=axes)
def UpsampleLayerParams(op, insym, symtab):
if op.scalingFactor[0] != op.scalingFactor[1]:
raise NotImplementedError("Upsampling only supported with same \
height and width scaling factor.")
interpolationMode = 'NEAREST_NEIGHBOR' if op.mode == 0 else 'BILINEAR'
return _sym.upsampling(insym, scale=op.scalingFactor[0], method=interpolationMode)
def L2NormalizeLayerParams(op, insym, symtab):
return _sym.l2_normalize(insym, eps=op.epsilon, axis=1)
def LRNLayerParams(op, insym, symtab):
par = {}
par['size'] = op.localSize
par['bias'] = op.k
par['alpha'] = op.alpha
par['beta'] = op.beta
par['axis'] = 1 #default layout is nchw
return _sym.lrn(data=insym, **par)
def AverageLayerParams(op, insyms, symtab):
if not isinstance(insyms, list) or len(insyms) < 2:
raise ValueError("Expect minimum 2 inputs")
count = len(insyms)
_sum = insyms[0]
for i in range(1, count):
_sum = _sym.broadcast_add(_sum, insyms[i])
return _sum / count
def MaxLayerParams(op, insyms, symtab):
if not isinstance(insyms, list) or len(insyms) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = insyms[0]
for i in range(1, len(insyms)):
_max = _sym.broadcast_max(_max, insyms[i])
return _max
def MinLayerParams(op, insyms, symtab):
if not isinstance(insyms, list) or len(insyms) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = insyms[0]
for i in range(1, len(insyms)):
_min = _sym.broadcast_min(_min, insyms[i])
return _min
_convert_map = {
'NeuralNetworkMeanImage': NeuralNetworkMeanImage,
'NeuralNetworkImageScaler': NeuralNetworkImageScaler,
'ConvolutionLayerParams':ConvolutionLayerParams,
'BatchnormLayerParams':BatchnormLayerParams,
'ActivationParams':ActivationParams,
'ScaleLayerParams':ScaleLayerParams,
'PoolingLayerParams':PoolingLayerParams,
'SoftmaxLayerParams':SoftmaxLayerParams,
'InnerProductLayerParams':InnerProductLayerParams,
'AddLayerParams':AddLayerParams,
'MultiplyLayerParams':MultiplyLayerParams,
'FlattenLayerParams':FlattenLayerParams,
'ConcatLayerParams':ConcatLayerParams,
'PaddingLayerParams':PaddingLayerParams,
'PermuteLayerParams':PermuteLayerParams,
'UpsampleLayerParams':UpsampleLayerParams,
'L2NormalizeLayerParams':L2NormalizeLayerParams,
'LRNLayerParams':LRNLayerParams,
'AverageLayerParams':AverageLayerParams,
'MaxLayerParams':MaxLayerParams,
'MinLayerParams':MinLayerParams,
}
def coreml_op_to_nnvm(op, inname, outname, symtab):
"""Convert coreml layer to nnvm layer.
Parameters
----------
coremlop: a coreml protobuf bit
prevsym: previous nnvm symbol
Returns:
-------
nnvm.sym.Symbol
Converted symbol
"""
classname = type(op).__name__
if classname not in _convert_map:
raise NotImplementedError("%s is not supported" % (classname))
if isinstance(inname, string_types):
insym = symtab.get_var(inname)
else:
insym = [symtab.get_var(i) for i in inname]
ret = _convert_map[classname](op, insym, symtab)
if outname:
symtab.set_var(outname, ret)
if classname != 'PaddingLayerParams':
assert not symtab.in_padding, "Previous padding not consumed by conv/pool"
def from_coreml(model):
"""Convert from coreml model into NNVM format.
Parameters
----------
model:
coremltools.models.MLModel of a NeuralNetworkClassifier
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.NDArray
The parameter dict to be used by nnvm
"""
try:
import coremltools as cm
except ImportError:
raise ImportError('The coremltools package must be installed')
assert isinstance(model, cm.models.MLModel)
spec = model.get_spec()
modeltype = spec.WhichOneof('Type')
assert modeltype in ['neuralNetworkClassifier', 'neuralNetwork', 'neuralNetworkRegressor']
cc = getattr(spec, modeltype)
symtab = SymbolTable()
for i in spec.description.input:
symtab.get_var(i.name, must_contain=False)
for pp in cc.preprocessing:
whichpp = pp.WhichOneof('preprocessor')
ppmethod = getattr(pp, whichpp)
# the NeuralNetworkImageScalar doesn't seem to have a featureName?
if whichpp == 'scaler':
for i in spec.description.input:
coreml_op_to_nnvm(ppmethod, i.name, i.name, symtab)
else:
coreml_op_to_nnvm(ppmethod, pp.featureName, pp.featureName, symtab)
for l in cc.layers:
layertype = l.WhichOneof('layer')
layerop = getattr(l, layertype)
assert len(l.output) == 1
if len(l.input) == 1:
coreml_op_to_nnvm(layerop, l.input[0], l.output[0], symtab)
else:
coreml_op_to_nnvm(layerop, list(l.input), l.output[0], symtab)
returns = [symtab.get_var(i.name, must_contain=False) for i in spec.description.output]
tvmparams = {k:tvm.nd.array(np.array(v, dtype=np.float32)) for k, v in symtab.params.items()}
# for now return first output
return returns[0], tvmparams
|
py | 7dfd5577635bfc03b6c4e846cf389bd8354c3687 | import csv
from urllib.request import Request, urlopen
import dateutil.parser
import re
from sys import argv
from bs4 import BeautifulSoup
import scrape_util
default_sale, base_url, prefix = scrape_util.get_market(argv)
report_path = '/market%20report.html'
strip_char = ';,. \n\t'
def get_sale_date(this_report):
"""Return the date of the sale."""
date_string = this_report['href'].split('/')[1]
date_string = date_string.replace('.html','')
sale_date = dateutil.parser.parse(date_string)
return sale_date
def is_sale(this_line):
"""Determine whether a given line describes a sale of cattle."""
td = this_line.find_all('td')
is_not_succinct = len(td) > 3
has_price = False
for td in td:
if re.search(r'[0-9]+\.[0-9]{2}', td.get_text()):
has_price = True
break
return bool(has_price and is_not_succinct)
def is_number(string):
"""Test whether a string is number-ish. Ignoring units like 'cwt' and 'hd'."""
if string:
string = re.sub(r'\$|[,-/]|cwt|he?a?d?', '', string, flags = re.IGNORECASE)
try:
float(string)
result = True
except ValueError:
result = False
else:
result = False
return result
def get_sale(word):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
number_word = [idx for idx, val in enumerate(word) if is_number(val)]
cattle_string = word[number_word[0]+1]
# Skip lines describing sales of non-cattle
if re.search(r'lamb|ewe',cattle_string, re.IGNORECASE):
return {}
consignor_name = word[0].strip(strip_char)
if re.match(r'consignment +of', consignor_name, re.IGNORECASE):
consignor_name = ''
sale = {
'consignor_name': consignor_name.title()
}
weight_match = re.search(r'\(([0-9,]+)#\)',cattle_string)
if weight_match:
weight_string = weight_match.group(1).replace(',', '')
try:
float(weight_string)
sale['cattle_avg_weight'] = weight_string
except ValueError:
pass
cattle_string = cattle_string.replace(weight_match.group(),'')
sale['cattle_cattle'] = re.sub(r'[\r\n\t]', '', cattle_string).strip(strip_char)
price_string = word[number_word.pop()]
head_string = word[number_word.pop(0)].strip(strip_char).replace(',', '')
try:
int(head_string)
sale['cattle_head'] = head_string
except ValueError:
pass
# Price key depends on the existence of weight string
if number_word:
weight_string = word[number_word.pop()].strip(strip_char).replace(',', '')
try:
float(weight_string)
sale['cattle_avg_weight'] = weight_string
except ValueError:
pass
key = 'cattle_price_cwt'
else:
key = 'cattle_price'
match = re.search(r'([0-9,.]+)', price_string, re.IGNORECASE)
if match:
sale[key] = match.group(1).replace(',', '').strip(strip_char)
sale = {k:v for k,v in sale.items() if v}
return sale
def write_sale(line, this_default_sale, writer):
"""Extract sales from a list of report lines and write them to a CSV file."""
for this_line in line:
if is_sale(this_line):
sale = this_default_sale.copy()
word = []
for td in this_line.find_all('td'):
word.append(td.get_text().replace('\xa0',''))
if word[0].strip() == '':
word[0] = consignor_name
else:
consignor_name = word[0]
sale.update(get_sale(word))
if sale != this_default_sale:
writer.writerow(sale)
def main():
# Collect individual reports into a list
request = Request(
base_url + report_path,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
a_tag = soup.find_all('a')
report = []
for this_a in a_tag:
if re.match(r'market(%20)?[0-9]{4}', this_a['href']):
report.append(this_a)
# Locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
# Write a CSV file for each report not in the archive
for this_report in report:
sale_date = get_sale_date(this_report)
io_name = archive.new_csv(sale_date)
# Stop iteration if this report is already archived
if not io_name:
continue
# Initialize the default sale dictionary
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
})
request = Request(
base_url + this_report['href'],
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
table = soup.find_all('table', id = re.compile('table[0-9]+'))
# List each line of the report
line = []
for this_table in table:
line += this_table.find_all('tr')
# Open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main()
|
py | 7dfd565f32e09b3def97660b05f6427be167e5b2 | """Implementation of Rule L048."""
from typing import Tuple, List
from sqlfluff.core.rules.doc_decorators import document_fix_compatible
from sqlfluff.rules.L006 import Rule_L006
@document_fix_compatible
class Rule_L048(Rule_L006):
"""Quoted literals should be surrounded by a single whitespace.
| **Anti-pattern**
| In this example, there is a space missing space between the string 'foo'
| and the keyword AS.
.. code-block:: sql
SELECT
'foo'AS bar
FROM foo
| **Best practice**
| Keep a single space.
.. code-block:: sql
SELECT
'foo' AS bar
FROM foo
"""
_target_elems: List[Tuple[str, str]] = [
("name", "quoted_literal"),
]
@staticmethod
def _missing_whitespace(seg, before=True):
"""Check whether we're missing whitespace given an adjoining segment.
This avoids flagging for commas after quoted strings.
https://github.com/sqlfluff/sqlfluff/issues/943
"""
simple_res = Rule_L006._missing_whitespace(seg, before=before)
if not before and seg and seg.is_type("comma"):
return False
return simple_res
|
py | 7dfd5680191734c5b6f5880455a295d949283028 | import json
import os
import numpy as np
from trescope import Trescope, Layout
from trescope.config import (Mesh3DConfig, PerspectiveCamera, FRONT3DConfig)
from trescope.toolbox import simpleDisplayOutputs, color_from_label, simpleFileOutputs
def visualize_front3d_mesh_type(output_id, front3d_scene_file):
with open(front3d_scene_file) as file:
front3d_scene = json.load(file)
type_cluster = {}
for mesh in front3d_scene['mesh']:
mesh_type = mesh['type']
if mesh_type not in type_cluster:
type_cluster[mesh_type] = {'xyz': np.array(mesh['xyz']).reshape((-1, 3)), 'faces': np.array(mesh['faces'], dtype=np.int).reshape((-1, 3))}
else:
xyz = np.array(mesh['xyz']).reshape((-1, 3))
faces = (np.array(mesh['faces'], dtype=np.int) + len(type_cluster[mesh_type]['xyz'])).reshape((-1, 3))
type_cluster[mesh_type]['xyz'] = np.vstack((type_cluster[mesh_type]['xyz'], xyz))
type_cluster[mesh_type]['faces'] = np.vstack((type_cluster[mesh_type]['faces'], faces))
for index, (mesh_type, mesh) in enumerate(type_cluster.items()):
Trescope().selectOutput(output_id).updateLayout(Layout().camera(PerspectiveCamera().up(0, 1, 0).eye(0, 2.3, 0)))
(Trescope()
.selectOutput(output_id)
.plotMesh3D(*mesh['xyz'].T)
.withConfig(Mesh3DConfig().indices(*mesh['faces'].T).color(color_from_label(index)).name(mesh_type)))
def visualize_front3d_color(output_id, front3d_scene_file):
(Trescope().selectOutput(output_id)
.plotFRONT3D(front3d_scene_file)
.withConfig(FRONT3DConfig()
.view('top')
.shapeLocalSource('../data/res/3D-FRONT-samples/3D-FUTURE-model/')
.hiddenMeshes(['Ceiling', 'CustomizedCeiling'])
.renderType('color')))
def visualize_front3d_depth(output_id, front3d_scene_file):
(Trescope().selectOutput(output_id)
.plotFRONT3D(front3d_scene_file)
.withConfig(FRONT3DConfig()
.view('top')
.shapeLocalSource('../data/res/3D-FRONT-samples/3D-FUTURE-model/')
.hiddenMeshes(['Ceiling', 'CustomizedCeiling'])
.renderType('depth')))
def visualize_front3d_normal(output_id, front3d_scene_file):
(Trescope().selectOutput(output_id)
.plotFRONT3D(front3d_scene_file)
.withConfig(FRONT3DConfig()
.view('top')
.shapeLocalSource('../data/res/3D-FRONT-samples/3D-FUTURE-model/')
.hiddenMeshes(['Ceiling', 'CustomizedCeiling'])
.renderType('normal')))
def main(output_type):
front3d_scene = '../data/res/3D-FRONT-samples/scenes/'
output_ids = [f'{file[:-5]}.{render_type}' for file in os.listdir(front3d_scene) for render_type in ['color', 'depth', 'normal', 'mesh_semantic']]
if 'file' == output_type:
Trescope().initialize(True, simpleFileOutputs('../data/gen/plot_front3d', output_ids, 720, 720))
else:
Trescope().initialize(True, simpleDisplayOutputs(3, 4, output_ids))
for index, file in enumerate(os.listdir(front3d_scene)):
name = file[:-5]
file = os.path.join(front3d_scene, file)
visualize_front3d_color(f'{name}.color', file)
visualize_front3d_depth(f'{name}.depth', file)
visualize_front3d_normal(f'{name}.normal', file)
visualize_front3d_mesh_type(f'{name}.mesh_semantic', file)
if __name__ == '__main__':
main('display')
|
py | 7dfd5837b6c6c90acffd5d1940811e8f5b81d95c | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
from flask import Blueprint
api_comm = Blueprint("api_comm", __name__)
api_user = Blueprint("api_user", __name__) |
py | 7dfd586d1deb0026bc780eb63378d91981b4ac40 | __all__ = ['get_netloc_port', 'requote_uri', 'timeout_manager']
from urllib.parse import quote
from functools import wraps
from curio import timeout_after, TaskTimeout
from .errors import RequestTimeout
async def timeout_manager(timeout, coro, *args):
try:
async with timeout_after(timeout):
return await coro(*args)
except TaskTimeout as e:
raise RequestTimeout from e
def get_netloc_port(scheme, netloc):
try:
netloc, port = netloc.split(':')
except ValueError:
if scheme == 'https':
port = '443'
else:
port = '80'
except TypeError:
raise RuntimeError('Something is goofed. Contact the author!')
return netloc, port
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" +
"0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise ValueError("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except ValueError:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def processor(gen):
@wraps(gen)
def wrapper(*args, **kwargs):
g = gen(*args, **kwargs)
next(g)
return g
return wrapper
|
py | 7dfd588a02b69fe1c2aaa570d8646e0ed90aaea4 | import os
import sys
import types
import attr
import py
import pytest
from _pytest.compat import importlib_metadata
from _pytest.config import ExitCode
from _pytest.pathlib import symlink_or_skip
from _pytest.pytester import Testdir
def prepend_pythonpath(*dirs):
cur = os.getenv("PYTHONPATH")
if cur:
dirs += (cur,)
return os.pathsep.join(str(p) for p in dirs)
class TestGeneralUsage:
def test_config_error(self, testdir):
testdir.copy_example("conftest_usageerror/conftest.py")
result = testdir.runpytest(testdir.tmpdir)
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(["*ERROR: hello"])
result.stdout.fnmatch_lines(["*pytest_unconfigure_called"])
def test_root_conftest_syntax_error(self, testdir):
testdir.makepyfile(conftest="raise SyntaxError\n")
result = testdir.runpytest()
result.stderr.fnmatch_lines(["*raise SyntaxError*"])
assert result.ret != 0
def test_early_hook_error_issue38_1(self, testdir):
testdir.makeconftest(
"""
def pytest_sessionstart():
0 / 0
"""
)
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
result = testdir.runpytest(testdir.tmpdir, "--fulltrace")
assert result.ret != 0
# tracestyle is native by default for hook failures
result.stdout.fnmatch_lines(
["*INTERNALERROR*def pytest_sessionstart():*", "*INTERNALERROR*0 / 0*"]
)
def test_early_hook_configure_error_issue38(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
0 / 0
"""
)
result = testdir.runpytest(testdir.tmpdir)
assert result.ret != 0
# here we get it on stderr
result.stderr.fnmatch_lines(
["*INTERNALERROR*File*conftest.py*line 2*", "*0 / 0*"]
)
def test_file_not_found(self, testdir):
result = testdir.runpytest("asd")
assert result.ret != 0
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
def test_file_not_found_unconfigure_issue143(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
print("---configure")
def pytest_unconfigure():
print("---unconfigure")
"""
)
result = testdir.runpytest("-s", "asd")
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(["ERROR: file not found*asd"])
result.stdout.fnmatch_lines(["*---configure", "*---unconfigure"])
def test_config_preparse_plugin_option(self, testdir):
testdir.makepyfile(
pytest_xyz="""
def pytest_addoption(parser):
parser.addoption("--xyz", dest="xyz", action="store")
"""
)
testdir.makepyfile(
test_one="""
def test_option(pytestconfig):
assert pytestconfig.option.xyz == "123"
"""
)
result = testdir.runpytest("-p", "pytest_xyz", "--xyz=123", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("load_cov_early", [True, False])
def test_early_load_setuptools_name(self, testdir, monkeypatch, load_cov_early):
monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
testdir.makepyfile(mytestplugin1_module="")
testdir.makepyfile(mytestplugin2_module="")
testdir.makepyfile(mycov_module="")
testdir.syspathinsert()
loaded = []
@attr.s
class DummyEntryPoint:
name = attr.ib()
module = attr.ib()
group = "pytest11"
def load(self):
__import__(self.module)
loaded.append(self.name)
return sys.modules[self.module]
entry_points = [
DummyEntryPoint("myplugin1", "mytestplugin1_module"),
DummyEntryPoint("myplugin2", "mytestplugin2_module"),
DummyEntryPoint("mycov", "mycov_module"),
]
@attr.s
class DummyDist:
entry_points = attr.ib()
files = ()
def my_dists():
return (DummyDist(entry_points),)
monkeypatch.setattr(importlib_metadata, "distributions", my_dists)
params = ("-p", "mycov") if load_cov_early else ()
testdir.runpytest_inprocess(*params)
if load_cov_early:
assert loaded == ["mycov", "myplugin1", "myplugin2"]
else:
assert loaded == ["myplugin1", "myplugin2", "mycov"]
def test_assertion_magic(self, testdir):
p = testdir.makepyfile(
"""
def test_this():
x = 0
assert x
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["> assert x", "E assert 0"])
assert result.ret == 1
def test_nested_import_error(self, testdir):
p = testdir.makepyfile(
"""
import import_fails
def test_this():
assert import_fails.a == 1
"""
)
testdir.makepyfile(import_fails="import does_not_work")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"ImportError while importing test module*",
"*No module named *does_not_work*",
]
)
assert result.ret == 2
def test_not_collectable_arguments(self, testdir):
p1 = testdir.makepyfile("")
p2 = testdir.makefile(".pyc", "123")
result = testdir.runpytest(p1, p2)
assert result.ret == ExitCode.USAGE_ERROR
result.stderr.fnmatch_lines(
[
"ERROR: not found: {}".format(p2),
"(no name {!r} in any of [[][]])".format(str(p2)),
"",
]
)
@pytest.mark.filterwarnings("default")
def test_better_reporting_on_conftest_load_failure(self, testdir):
"""Show a user-friendly traceback on conftest import failures (#486, #3332)"""
testdir.makepyfile("")
conftest = testdir.makeconftest(
"""
def foo():
import qwerty
foo()
"""
)
result = testdir.runpytest("--help")
result.stdout.fnmatch_lines(
"""
*--version*
*warning*conftest.py*
"""
)
result = testdir.runpytest()
exc_name = (
"ModuleNotFoundError" if sys.version_info >= (3, 6) else "ImportError"
)
assert result.stdout.lines == []
assert result.stderr.lines == [
"ImportError while loading conftest '{}'.".format(conftest),
"conftest.py:3: in <module>",
" foo()",
"conftest.py:2: in foo",
" import qwerty",
"E {}: No module named 'qwerty'".format(exc_name),
]
@pytest.mark.filterwarnings("ignore::pytest.PytestDeprecationWarning")
def test_early_skip(self, testdir):
testdir.mkdir("xyz")
testdir.makeconftest(
"""
import pytest
def pytest_collect_directory():
pytest.skip("early")
"""
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*1 skip*"])
def test_issue88_initial_file_multinodes(self, testdir):
testdir.copy_example("issue88_initial_file_multinodes")
p = testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest(p, "--collect-only")
result.stdout.fnmatch_lines(["*MyFile*test_issue88*", "*Module*test_issue88*"])
def test_issue93_initialnode_importing_capturing(self, testdir):
testdir.makeconftest(
"""
import sys
print("should not be seen")
sys.stderr.write("stder42\\n")
"""
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.no_fnmatch_line("*should not be seen*")
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
testdir.makeconftest(
"""
print("should be seen")
assert 0
"""
)
result = testdir.runpytest()
assert result.ret != 0
assert "should be seen" in result.stdout.str()
def test_issue109_sibling_conftests_not_loaded(self, testdir):
sub1 = testdir.mkdir("sub1")
sub2 = testdir.mkdir("sub2")
sub1.join("conftest.py").write("assert 0")
result = testdir.runpytest(sub2)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
sub2.ensure("__init__.py")
p = sub2.ensure("test_hello.py")
result = testdir.runpytest(p)
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result = testdir.runpytest(sub1)
assert result.ret == ExitCode.USAGE_ERROR
def test_directory_skipped(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_ignore_collect():
pytest.skip("intentional")
"""
)
testdir.makepyfile("def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_multiple_items_per_collector_byid(self, testdir):
c = testdir.makeconftest(
"""
import pytest
class MyItem(pytest.Item):
def runtest(self):
pass
class MyCollector(pytest.File):
def collect(self):
return [MyItem(name="xyz", parent=self)]
def pytest_collect_file(path, parent):
if path.basename.startswith("conftest"):
return MyCollector(path, parent)
"""
)
result = testdir.runpytest(c.basename + "::" + "xyz")
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 pass*"])
def test_skip_on_generated_funcarg_id(self, testdir):
testdir.makeconftest(
"""
import pytest
def pytest_generate_tests(metafunc):
metafunc.parametrize('x', [3], ids=['hello-123'])
def pytest_runtest_setup(item):
print(item.keywords)
if 'hello-123' in item.keywords:
pytest.skip("hello")
assert 0
"""
)
p = testdir.makepyfile("""def test_func(x): pass""")
res = testdir.runpytest(p)
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 skipped*"])
def test_direct_addressing_selects(self, testdir):
p = testdir.makepyfile(
"""
def pytest_generate_tests(metafunc):
metafunc.parametrize('i', [1, 2], ids=["1", "2"])
def test_func(i):
pass
"""
)
res = testdir.runpytest(p.basename + "::" + "test_func[1]")
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_direct_addressing_notfound(self, testdir):
p = testdir.makepyfile(
"""
def test_func():
pass
"""
)
res = testdir.runpytest(p.basename + "::" + "test_notfound")
assert res.ret
res.stderr.fnmatch_lines(["*ERROR*not found*"])
def test_docstring_on_hookspec(self):
from _pytest import hookspec
for name, value in vars(hookspec).items():
if name.startswith("pytest_"):
assert value.__doc__, "no docstring for %s" % name
def test_initialization_error_issue49(self, testdir):
testdir.makeconftest(
"""
def pytest_configure():
x
"""
)
result = testdir.runpytest()
assert result.ret == 3 # internal error
result.stderr.fnmatch_lines(["INTERNAL*pytest_configure*", "INTERNAL*x*"])
assert "sessionstarttime" not in result.stderr.str()
@pytest.mark.parametrize("lookfor", ["test_fun.py::test_a"])
def test_issue134_report_error_when_collecting_member(self, testdir, lookfor):
testdir.makepyfile(
test_fun="""
def test_a():
pass
def"""
)
result = testdir.runpytest(lookfor)
result.stdout.fnmatch_lines(["*SyntaxError*"])
if "::" in lookfor:
result.stderr.fnmatch_lines(["*ERROR*"])
assert result.ret == 4 # usage error only if item not found
def test_report_all_failed_collections_initargs(self, testdir):
testdir.makeconftest(
"""
from _pytest.config import ExitCode
def pytest_sessionfinish(exitstatus):
assert exitstatus == ExitCode.USAGE_ERROR
print("pytest_sessionfinish_called")
"""
)
testdir.makepyfile(test_a="def", test_b="def")
result = testdir.runpytest("test_a.py::a", "test_b.py::b")
result.stderr.fnmatch_lines(["*ERROR*test_a.py::a*", "*ERROR*test_b.py::b*"])
result.stdout.fnmatch_lines(["pytest_sessionfinish_called"])
assert result.ret == ExitCode.USAGE_ERROR
@pytest.mark.usefixtures("recwarn")
def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
"""
Ref #383. Python 3.3's namespace package messed with our import hooks
Importing a module that didn't exist, even if the ImportError was
gracefully handled, would make our test crash.
Use recwarn here to silence this warning in Python 2.7:
ImportWarning: Not importing directory '...\not_a_package': missing __init__.py
"""
testdir.mkdir("not_a_package")
p = testdir.makepyfile(
"""
try:
from not_a_package import doesnt_exist
except ImportError:
# We handle the import error gracefully here
pass
def test_whatever():
pass
"""
)
res = testdir.runpytest(p.basename)
assert res.ret == 0
def test_unknown_option(self, testdir):
result = testdir.runpytest("--qwlkej")
result.stderr.fnmatch_lines(
"""
*unrecognized*
"""
)
def test_getsourcelines_error_issue553(self, testdir, monkeypatch):
monkeypatch.setattr("inspect.getsourcelines", None)
p = testdir.makepyfile(
"""
def raise_error(obj):
raise OSError('source code not available')
import inspect
inspect.getsourcelines = raise_error
def test_foo(invalid_fixture):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines(
["*source code not available*", "E*fixture 'invalid_fixture' not found"]
)
def test_plugins_given_as_strings(self, tmpdir, monkeypatch, _sys_snapshot):
"""test that str values passed to main() as `plugins` arg
are interpreted as module names to be imported and registered.
#855.
"""
with pytest.raises(ImportError) as excinfo:
pytest.main([str(tmpdir)], plugins=["invalid.module"])
assert "invalid" in str(excinfo.value)
p = tmpdir.join("test_test_plugins_given_as_strings.py")
p.write("def test_foo(): pass")
mod = types.ModuleType("myplugin")
monkeypatch.setitem(sys.modules, "myplugin", mod)
assert pytest.main(args=[str(tmpdir)], plugins=["myplugin"]) == 0
def test_parametrized_with_bytes_regex(self, testdir):
p = testdir.makepyfile(
"""
import re
import pytest
@pytest.mark.parametrize('r', [re.compile(b'foo')])
def test_stuff(r):
pass
"""
)
res = testdir.runpytest(p)
res.stdout.fnmatch_lines(["*1 passed*"])
def test_parametrized_with_null_bytes(self, testdir):
"""Test parametrization with values that contain null bytes and unicode characters (#2644, #2957)"""
p = testdir.makepyfile(
"""\
import pytest
@pytest.mark.parametrize("data", [b"\\x00", "\\x00", 'ação'])
def test_foo(data):
assert data
"""
)
res = testdir.runpytest(p)
res.assert_outcomes(passed=3)
class TestInvocationVariants:
def test_earlyinit(self, testdir):
p = testdir.makepyfile(
"""
import pytest
assert hasattr(pytest, 'mark')
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_pydoc(self, testdir):
for name in ("py.test", "pytest"):
result = testdir.runpython_c("import {};help({})".format(name, name))
assert result.ret == 0
s = result.stdout.str()
assert "MarkGenerator" in s
def test_import_star_py_dot_test(self, testdir):
p = testdir.makepyfile(
"""
from py.test import *
#collect
#cmdline
#Item
# assert collect.Item is Item
# assert collect.Collector is Collector
main
skip
xfail
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_import_star_pytest(self, testdir):
p = testdir.makepyfile(
"""
from pytest import *
#Item
#File
main
skip
xfail
"""
)
result = testdir.runpython(p)
assert result.ret == 0
def test_double_pytestcmdline(self, testdir):
p = testdir.makepyfile(
run="""
import pytest
pytest.main()
pytest.main()
"""
)
testdir.makepyfile(
"""
def test_hello():
pass
"""
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(["*1 passed*", "*1 passed*"])
def test_python_minus_m_invocation_ok(self, testdir):
p1 = testdir.makepyfile("def test_hello(): pass")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
def test_python_minus_m_invocation_fail(self, testdir):
p1 = testdir.makepyfile("def test_fail(): 0/0")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 1
def test_python_pytest_package(self, testdir):
p1 = testdir.makepyfile("def test_pass(): pass")
res = testdir.run(sys.executable, "-m", "pytest", str(p1))
assert res.ret == 0
res.stdout.fnmatch_lines(["*1 passed*"])
def test_equivalence_pytest_pytest(self):
assert pytest.main == py.test.cmdline.main
def test_invoke_with_invalid_type(self):
with pytest.raises(
TypeError, match="expected to be a list of strings, got: '-h'"
):
pytest.main("-h")
def test_invoke_with_path(self, tmpdir, capsys):
retcode = pytest.main(tmpdir)
assert retcode == ExitCode.NO_TESTS_COLLECTED
out, err = capsys.readouterr()
def test_invoke_plugin_api(self, capsys):
class MyPlugin:
def pytest_addoption(self, parser):
parser.addoption("--myopt")
pytest.main(["-h"], plugins=[MyPlugin()])
out, err = capsys.readouterr()
assert "--myopt" in out
def test_pyargs_importerror(self, testdir, monkeypatch):
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("raise ImportError")
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret != 0
result.stdout.fnmatch_lines(["collected*0*items*/*1*error"])
def test_pyargs_only_imported_once(self, testdir):
pkg = testdir.mkpydir("foo")
pkg.join("test_foo.py").write("print('hello from test_foo')\ndef test(): pass")
pkg.join("conftest.py").write(
"def pytest_configure(config): print('configuring')"
)
result = testdir.runpytest("--pyargs", "foo.test_foo", "-s", syspathinsert=True)
# should only import once
assert result.outlines.count("hello from test_foo") == 1
# should only configure once
assert result.outlines.count("configuring") == 1
def test_pyargs_filename_looks_like_module(self, testdir):
testdir.tmpdir.join("conftest.py").ensure()
testdir.tmpdir.join("t.py").write("def test(): pass")
result = testdir.runpytest("--pyargs", "t.py")
assert result.ret == ExitCode.OK
def test_cmdline_python_package(self, testdir, monkeypatch):
import warnings
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", False)
path = testdir.mkpydir("tpkg")
path.join("test_hello.py").write("def test_hello(): pass")
path.join("test_world.py").write("def test_world(): pass")
result = testdir.runpytest("--pyargs", "tpkg")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
empty_package = testdir.mkpydir("empty_package")
monkeypatch.setenv("PYTHONPATH", str(empty_package), prepend=os.pathsep)
# the path which is not a package raises a warning on pypy;
# no idea why only pypy and not normal python warn about it here
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
result = testdir.runpytest("--pyargs", ".")
assert result.ret == 0
result.stdout.fnmatch_lines(["*2 passed*"])
monkeypatch.setenv("PYTHONPATH", str(testdir), prepend=os.pathsep)
result = testdir.runpytest("--pyargs", "tpkg.test_missing", syspathinsert=True)
assert result.ret != 0
result.stderr.fnmatch_lines(["*not*found*test_missing*"])
def test_cmdline_python_namespace_package(self, testdir, monkeypatch):
"""
test --pyargs option with namespace packages (#1567)
Ref: https://packaging.python.org/guides/packaging-namespace-packages/
"""
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
search_path = []
for dirname in "hello", "world":
d = testdir.mkdir(dirname)
search_path.append(d)
ns = d.mkdir("ns_pkg")
ns.join("__init__.py").write(
"__import__('pkg_resources').declare_namespace(__name__)"
)
lib = ns.mkdir(dirname)
lib.ensure("__init__.py")
lib.join("test_{}.py".format(dirname)).write(
"def test_{}(): pass\ndef test_other():pass".format(dirname)
)
# The structure of the test directory is now:
# .
# ├── hello
# │ └── ns_pkg
# │ ├── __init__.py
# │ └── hello
# │ ├── __init__.py
# │ └── test_hello.py
# └── world
# └── ns_pkg
# ├── __init__.py
# └── world
# ├── __init__.py
# └── test_world.py
# NOTE: the different/reversed ordering is intentional here.
monkeypatch.setenv("PYTHONPATH", prepend_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# mixed module and filenames:
monkeypatch.chdir("world")
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world")
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"test_hello.py::test_hello*PASSED*",
"test_hello.py::test_other*PASSED*",
"ns_pkg/world/test_world.py::test_world*PASSED*",
"ns_pkg/world/test_world.py::test_other*PASSED*",
"*4 passed in*",
]
)
# specify tests within a module
testdir.chdir()
result = testdir.runpytest(
"--pyargs", "-v", "ns_pkg.world.test_world::test_other"
)
assert result.ret == 0
result.stdout.fnmatch_lines(
["*test_world.py::test_other*PASSED*", "*1 passed*"]
)
def test_invoke_test_and_doctestmodules(self, testdir):
p = testdir.makepyfile(
"""
def test():
pass
"""
)
result = testdir.runpytest(str(p) + "::test", "--doctest-modules")
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cmdline_python_package_symlink(self, testdir, monkeypatch):
"""
--pyargs with packages with path containing symlink can have conftest.py in
their package (#2985)
"""
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
dirname = "lib"
d = testdir.mkdir(dirname)
foo = d.mkdir("foo")
foo.ensure("__init__.py")
lib = foo.mkdir("bar")
lib.ensure("__init__.py")
lib.join("test_bar.py").write(
"def test_bar(): pass\ndef test_other(a_fixture):pass"
)
lib.join("conftest.py").write(
"import pytest\[email protected]\ndef a_fixture():pass"
)
d_local = testdir.mkdir("symlink_root")
symlink_location = d_local / "lib"
symlink_or_skip(d, symlink_location, target_is_directory=True)
# The structure of the test directory is now:
# .
# ├── symlink_root
# │ └── lib -> ../lib
# └── lib
# └── foo
# ├── __init__.py
# └── bar
# ├── __init__.py
# ├── conftest.py
# └── test_bar.py
# NOTE: the different/reversed ordering is intentional here.
search_path = ["lib", os.path.join("symlink_root", "lib")]
monkeypatch.setenv("PYTHONPATH", prepend_pythonpath(*search_path))
for p in search_path:
monkeypatch.syspath_prepend(p)
# module picked up in symlink-ed directory:
# It picks up symlink_root/lib/foo/bar (symlink) via sys.path.
result = testdir.runpytest("--pyargs", "-v", "foo.bar")
testdir.chdir()
assert result.ret == 0
result.stdout.fnmatch_lines(
[
"symlink_root/lib/foo/bar/test_bar.py::test_bar PASSED*",
"symlink_root/lib/foo/bar/test_bar.py::test_other PASSED*",
"*2 passed*",
]
)
def test_cmdline_python_package_not_exists(self, testdir):
result = testdir.runpytest("--pyargs", "tpkgwhatv")
assert result.ret
result.stderr.fnmatch_lines(["ERROR*file*or*package*not*found*"])
@pytest.mark.xfail(reason="decide: feature or bug")
def test_noclass_discovery_if_not_testcase(self, testdir):
testpath = testdir.makepyfile(
"""
import unittest
class TestHello(object):
def test_hello(self):
assert self.attr
class RealTest(unittest.TestCase, TestHello):
attr = 42
"""
)
reprec = testdir.inline_run(testpath)
reprec.assertoutcome(passed=1)
def test_doctest_id(self, testdir):
testdir.makefile(
".txt",
"""
>>> x=3
>>> x
4
""",
)
testid = "test_doctest_id.txt::test_doctest_id.txt"
expected_lines = [
"*= FAILURES =*",
"*_ ?doctest? test_doctest_id.txt _*",
"FAILED test_doctest_id.txt::test_doctest_id.txt",
"*= 1 failed in*",
]
result = testdir.runpytest(testid, "-rf", "--tb=short")
result.stdout.fnmatch_lines(expected_lines)
# Ensure that re-running it will still handle it as
# doctest.DocTestFailure, which was not the case before when
# re-importing doctest, but not creating a new RUNNER_CLASS.
result = testdir.runpytest(testid, "-rf", "--tb=short")
result.stdout.fnmatch_lines(expected_lines)
def test_core_backward_compatibility(self):
"""Test backward compatibility for get_plugin_manager function. See #787."""
import _pytest.config
assert (
type(_pytest.config.get_plugin_manager())
is _pytest.config.PytestPluginManager
)
def test_has_plugin(self, request):
"""Test hasplugin function of the plugin manager (#932)."""
assert request.config.pluginmanager.hasplugin("python")
class TestDurations:
source = """
from _pytest import timing
def test_something():
pass
def test_2():
timing.sleep(0.010)
def test_1():
timing.sleep(0.002)
def test_3():
timing.sleep(0.020)
"""
def test_calls(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
["*durations*", "*call*test_3*", "*call*test_2*"]
)
result.stdout.fnmatch_lines(
["(8 durations < 0.005s hidden. Use -vv to show these durations.)"]
)
def test_calls_show_2(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=2")
assert result.ret == 0
lines = result.stdout.get_lines_after("*slowest*durations*")
assert "4 passed" in lines[2]
def test_calls_showall(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=0")
assert result.ret == 0
tested = "3"
for x in tested:
for y in ("call",): # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found {} {}".format(x, y))
def test_calls_showall_verbose(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=0", "-vv")
assert result.ret == 0
for x in "123":
for y in ("call",): # 'setup', 'call', 'teardown':
for line in result.stdout.lines:
if ("test_%s" % x) in line and y in line:
break
else:
raise AssertionError("not found {} {}".format(x, y))
def test_with_deselected(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=2", "-k test_3")
assert result.ret == 0
result.stdout.fnmatch_lines(["*durations*", "*call*test_3*"])
def test_with_failing_collection(self, testdir, mock_timing):
testdir.makepyfile(self.source)
testdir.makepyfile(test_collecterror="""xyz""")
result = testdir.runpytest_inprocess("--durations=2", "-k test_1")
assert result.ret == 2
result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
# Collection errors abort test execution, therefore no duration is
# output
result.stdout.no_fnmatch_line("*duration*")
def test_with_not(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("-k not 1")
assert result.ret == 0
class TestDurationsWithFixture:
source = """
import pytest
from _pytest import timing
@pytest.fixture
def setup_fixt():
timing.sleep(2)
def test_1(setup_fixt):
timing.sleep(5)
"""
def test_setup_function(self, testdir, mock_timing):
testdir.makepyfile(self.source)
result = testdir.runpytest_inprocess("--durations=10")
assert result.ret == 0
result.stdout.fnmatch_lines_random(
"""
*durations*
5.00s call *test_1*
2.00s setup *test_1*
"""
)
def test_zipimport_hook(testdir, tmpdir):
"""Test package loader is being used correctly (see #1837)."""
zipapp = pytest.importorskip("zipapp")
testdir.tmpdir.join("app").ensure(dir=1)
testdir.makepyfile(
**{
"app/foo.py": """
import pytest
def main():
pytest.main(['--pyargs', 'foo'])
"""
}
)
target = tmpdir.join("foo.zip")
zipapp.create_archive(str(testdir.tmpdir.join("app")), str(target), main="foo:main")
result = testdir.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(["*not found*foo*"])
result.stdout.no_fnmatch_line("*INTERNALERROR>*")
def test_import_plugin_unicode_name(testdir):
testdir.makepyfile(myplugin="")
testdir.makepyfile("def test(): pass")
testdir.makeconftest("pytest_plugins = ['myplugin']")
r = testdir.runpytest()
assert r.ret == 0
def test_pytest_plugins_as_module(testdir):
"""Do not raise an error if pytest_plugins attribute is a module (#3899)"""
testdir.makepyfile(
**{
"__init__.py": "",
"pytest_plugins.py": "",
"conftest.py": "from . import pytest_plugins",
"test_foo.py": "def test(): pass",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed in *"])
def test_deferred_hook_checking(testdir):
"""
Check hooks as late as possible (#1821).
"""
testdir.syspathinsert()
testdir.makepyfile(
**{
"plugin.py": """
class Hooks(object):
def pytest_my_hook(self, config):
pass
def pytest_configure(config):
config.pluginmanager.add_hookspecs(Hooks)
""",
"conftest.py": """
pytest_plugins = ['plugin']
def pytest_my_hook(config):
return 40
""",
"test_foo.py": """
def test(request):
assert request.config.hook.pytest_my_hook(config=request.config) == [40]
""",
}
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_fixture_values_leak(testdir):
"""Ensure that fixture objects are properly destroyed by the garbage collector at the end of their expected
life-times (#2981).
"""
testdir.makepyfile(
"""
import attr
import gc
import pytest
import weakref
@attr.s
class SomeObj(object):
name = attr.ib()
fix_of_test1_ref = None
session_ref = None
@pytest.fixture(scope='session')
def session_fix():
global session_ref
obj = SomeObj(name='session-fixture')
session_ref = weakref.ref(obj)
return obj
@pytest.fixture
def fix(session_fix):
global fix_of_test1_ref
obj = SomeObj(name='local-fixture')
fix_of_test1_ref = weakref.ref(obj)
return obj
def test1(fix):
assert fix_of_test1_ref() is fix
def test2():
gc.collect()
# fixture "fix" created during test1 must have been destroyed by now
assert fix_of_test1_ref() is None
"""
)
# Running on subprocess does not activate the HookRecorder
# which holds itself a reference to objects in case of the
# pytest_assert_reprcompare hook
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["* 2 passed *"])
def test_fixture_order_respects_scope(testdir):
"""Ensure that fixtures are created according to scope order, regression test for #2405
"""
testdir.makepyfile(
"""
import pytest
data = {}
@pytest.fixture(scope='module')
def clean_data():
data.clear()
@pytest.fixture(autouse=True)
def add_data():
data.update(value=True)
@pytest.mark.usefixtures('clean_data')
def test_value():
assert data.get('value')
"""
)
result = testdir.runpytest()
assert result.ret == 0
def test_frame_leak_on_failing_test(testdir):
"""pytest would leak garbage referencing the frames of tests that failed that could never be reclaimed (#2798)
Unfortunately it was not possible to remove the actual circles because most of them
are made of traceback objects which cannot be weakly referenced. Those objects at least
can be eventually claimed by the garbage collector.
"""
testdir.makepyfile(
"""
import gc
import weakref
class Obj:
pass
ref = None
def test1():
obj = Obj()
global ref
ref = weakref.ref(obj)
assert 0
def test2():
gc.collect()
assert ref() is None
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 failed, 1 passed in*"])
def test_fixture_mock_integration(testdir):
"""Test that decorators applied to fixture are left working (#3774)"""
p = testdir.copy_example("acceptance/fixture_mock_integration.py")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_usage_error_code(testdir):
result = testdir.runpytest("-unknown-option-")
assert result.ret == ExitCode.USAGE_ERROR
@pytest.mark.filterwarnings("default")
def test_warn_on_async_function(testdir):
testdir.makepyfile(
test_async="""
async def test_1():
pass
async def test_2():
pass
def test_3():
return test_2()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
"test_async.py::test_2",
"test_async.py::test_3",
"*async def functions are not natively supported*",
"*3 skipped, 3 warnings in*",
]
)
# ensure our warning message appears only once
assert (
result.stdout.str().count("async def functions are not natively supported") == 1
)
@pytest.mark.filterwarnings("default")
@pytest.mark.skipif(
sys.version_info < (3, 6), reason="async gen syntax available in Python 3.6+"
)
def test_warn_on_async_gen_function(testdir):
testdir.makepyfile(
test_async="""
async def test_1():
yield
async def test_2():
yield
def test_3():
return test_2()
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"test_async.py::test_1",
"test_async.py::test_2",
"test_async.py::test_3",
"*async def functions are not natively supported*",
"*3 skipped, 3 warnings in*",
]
)
# ensure our warning message appears only once
assert (
result.stdout.str().count("async def functions are not natively supported") == 1
)
def test_pdb_can_be_rewritten(testdir):
testdir.makepyfile(
**{
"conftest.py": """
import pytest
pytest.register_assert_rewrite("pdb")
""",
"__init__.py": "",
"pdb.py": """
def check():
assert 1 == 2
""",
"test_pdb.py": """
def test():
import pdb
assert pdb.check()
""",
}
)
# Disable debugging plugin itself to avoid:
# > INTERNALERROR> AttributeError: module 'pdb' has no attribute 'set_trace'
result = testdir.runpytest_subprocess("-p", "no:debugging", "-vv")
result.stdout.fnmatch_lines(
[
" def check():",
"> assert 1 == 2",
"E assert 1 == 2",
"E +1",
"E -2",
"",
"pdb.py:2: AssertionError",
"*= 1 failed in *",
]
)
assert result.ret == 1
def test_tee_stdio_captures_and_live_prints(testdir):
testpath = testdir.makepyfile(
"""
import sys
def test_simple():
print ("@this is stdout@")
print ("@this is stderr@", file=sys.stderr)
"""
)
result = testdir.runpytest_subprocess(
testpath,
"--capture=tee-sys",
"--junitxml=output.xml",
"-o",
"junit_logging=all",
)
# ensure stdout/stderr were 'live printed'
result.stdout.fnmatch_lines(["*@this is stdout@*"])
result.stderr.fnmatch_lines(["*@this is stderr@*"])
# now ensure the output is in the junitxml
with open(os.path.join(testdir.tmpdir.strpath, "output.xml")) as f:
fullXml = f.read()
assert "@this is stdout@\n" in fullXml
assert "@this is stderr@\n" in fullXml
@pytest.mark.skipif(
sys.platform == "win32",
reason="Windows raises `OSError: [Errno 22] Invalid argument` instead",
)
def test_no_brokenpipeerror_message(testdir: Testdir) -> None:
"""Ensure that the broken pipe error message is supressed.
In some Python versions, it reaches sys.unraisablehook, in others
a BrokenPipeError exception is propagated, but either way it prints
to stderr on shutdown, so checking nothing is printed is enough.
"""
popen = testdir.popen((*testdir._getpytestargs(), "--help"))
popen.stdout.close()
ret = popen.wait()
assert popen.stderr.read() == b""
assert ret == 1
|
py | 7dfd588ea7520e7e1c5b47c7ce7faca96717be72 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'ListWebAppMetadataSlotResult',
'AwaitableListWebAppMetadataSlotResult',
'list_web_app_metadata_slot',
]
@pulumi.output_type
class ListWebAppMetadataSlotResult:
"""
String dictionary resource.
"""
def __init__(__self__, id=None, kind=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Mapping[str, str]:
"""
Settings.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppMetadataSlotResult(ListWebAppMetadataSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppMetadataSlotResult(
id=self.id,
kind=self.kind,
name=self.name,
properties=self.properties,
type=self.type)
def list_web_app_metadata_slot(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppMetadataSlotResult:
"""
String dictionary resource.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will get the metadata for the production slot.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:web/v20181101:listWebAppMetadataSlot', __args__, opts=opts, typ=ListWebAppMetadataSlotResult).value
return AwaitableListWebAppMetadataSlotResult(
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
properties=__ret__.properties,
type=__ret__.type)
|
py | 7dfd5ac725ab1224848c870fbccd1ee37ecd7c67 | def bubbleSort(arr):
n = len(arr)
# Traverse through all array elements
for i in range(n-1):
# range(n) also work but outer loop will repeat one time more than needed.
# Last i elements are already in place
for j in range(0, n-i-1):
# traverse the array from 0 to n-i-1
# Swap if the element found is greater
# than the next element
if arr[j] > arr[j+1] :
arr[j], arr[j+1] = arr[j+1], arr[j]
Driver code to test above
arr = [64, 34, 25, 12, 22, 11, 90]
bubbleSort(arr)
print ("Sorted array is:")
for i in range(len(arr)):
print ("%d" %arr[i]),
|
py | 7dfd5b15394d7aef48a78b3c68a354e442b2c430 | """
authorized key management.
"""
# TODO: needs pytests
import io
import itertools
import os.path
from six import itervalues
from cloudmesh.common.Shell import Subprocess
from cloudmesh.common.util import tempdir
# TODO: use our simple subprocess wrapper ?
def get_fingerprint_from_public_key(pubkey):
"""Generate the fingerprint of a public key
:param str pubkey: the value of the public key
:returns: fingerprint
:rtype: str
"""
# TODO: why is there a tmpdir?
with tempdir() as workdir:
key = os.path.join(workdir, 'key.pub')
with open(key, 'w') as fd:
fd.write(pubkey)
cmd = [
'ssh-keygen',
'-l',
'-f', key,
]
p = Subprocess(cmd)
output = p.stdout.strip()
bits, fingerprint, _ = output.split(' ', 2)
return fingerprint
class AuthorizedKeys(object):
"""
Class to manage authorized keys.
"""
def __init__(self):
self._order = dict()
self._keys = dict()
@classmethod
def load(cls, path):
"""
load the keys from a path
:param path: the filename (path) in which we find the keys
:return:
"""
auth = cls()
with open(path) as fd:
for pubkey in itertools.imap(str.strip, fd):
# skip empty lines
if not pubkey:
continue
auth.add(pubkey)
return auth
def add(self, pubkey):
"""
add a public key.
:param pubkey: the filename to the public key
:return:
"""
f = get_fingerprint_from_public_key(pubkey)
if f not in self._keys:
self._order[len(self._keys)] = f
self._keys[f] = pubkey
def remove(self, pubkey):
"""
Removes the public key
TODO: this method is not implemented
:param pubkey: the filename of the public key
:return:
"""
raise NotImplementedError()
def __str__(self):
sio = io.StringIO()
# TODO: make python 2 and 3 compatible
# old: for fingerprint in self._order.itervalues():
for fingerprint in itervalues(self._order):
key = self._keys[fingerprint]
sio.write(key)
sio.write('\n')
text = sio.getvalue()
sio.close()
return text.strip()
if __name__ == '__main__':
import sys
path = sys.argv[1]
auth = AuthorizedKeys.load(path)
print(auth)
|
py | 7dfd5b260179a48e9b5e6eb975e7f08c3d6e0b43 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import sys
def main():
for path in sys.argv[1:] or sys.stdin.read().splitlines():
with open(path, 'r') as path_fd:
for line, text in enumerate(path_fd.readlines()):
match = re.search(r'(?<! six)\.(iteritems)', text)
if match:
print('%s:%d:%d: use `dict.items` or `ansible.module_utils.six.iteritems` instead of `dict.iteritems`' % (
path, line + 1, match.start(1) + 1))
if __name__ == '__main__':
main()
|
py | 7dfd5b63e1ffffca2ec042ade6571262fc4069a2 | #
# GtkHelp.py -- customized Gtk2 widgets
#
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
from __future__ import print_function
import sys, os.path
from ginga.misc import Bunch
import ginga.toolkit
import gtk
import gobject
import pango
ginga.toolkit.use('gtk2')
class WidgetMask(object):
def __init__(self, *args):
self.cb_fn = None
self.cb_args = []
self.cb_kwdargs = {}
self.connected = False
self.changed = False
def sconnect(self, signal, cb_fn, *args, **kwdargs):
self.cb_fn = cb_fn
self.cb_args = args
self.cb_kwdargs = kwdargs
self.connect(signal, self.cb)
self.connected = True
def change(self):
if self.connected:
self.changed = True
def cb(self, *args):
if self.changed:
self.changed = False
return
newargs = list(args)
newargs.extend(self.cb_args)
kwdargs = self.cb_kwdargs.copy()
return self.cb_fn(*newargs, **kwdargs)
class TopLevel(gtk.Window):
def __init__(self):
gtk.Window.__init__(self, gtk.WINDOW_TOPLEVEL)
class CheckButton(WidgetMask, gtk.CheckButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.CheckButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(CheckButton, self).set_active(newval)
class ToggleButton(WidgetMask, gtk.ToggleButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.ToggleButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(ToggleButton, self).set_active(newval)
def toggle(self):
oldval = self.get_active()
newval = not oldval
super(ToggleButton, self).set_active(newval)
class RadioButton(WidgetMask, gtk.RadioButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.RadioButton.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(RadioButton, self).set_active(newval)
def toggle(self):
oldval = self.get_active()
newval = not oldval
super(RadioButton, self).set_active(newval)
class CheckMenuItem(WidgetMask, gtk.CheckMenuItem):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.CheckMenuItem.__init__(self, *args, **kwdargs)
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(CheckMenuItem, self).set_active(newval)
class SpinButton(WidgetMask, gtk.SpinButton):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.SpinButton.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(SpinButton, self).set_value(newval)
class HScale(WidgetMask, gtk.HScale):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.HScale.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(HScale, self).set_value(newval)
class VScale(WidgetMask, gtk.VScale):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.VScale.__init__(self, *args, **kwdargs)
def set_value(self, newval):
oldval = self.get_value()
if oldval != newval:
self.change()
super(VScale, self).set_value(newval)
class ComboBoxMixin(object):
def set_active(self, newval):
oldval = self.get_active()
if oldval != newval:
self.change()
super(ComboBox, self).set_active(newval)
def insert_alpha(self, text):
model = self.get_model()
tup = (text, )
j = 0
for i in range(len(model)):
j = i
if model[i][0] > text:
model.insert(j, tup)
return
model.insert(j+1, tup)
def insert_text(self, idx, text):
model = self.get_model()
tup = (text, )
model.insert(idx, tup)
def delete_alpha(self, text):
model = self.get_model()
for i in range(len(model)):
if model[i][0] == text:
del model[i]
return
def clear(self):
model = self.get_model()
model.clear()
def show_text(self, text):
model = self.get_model()
for i in range(len(model)):
if model[i][0] == text:
self.set_active(i)
return
class ComboBox(WidgetMask, gtk.ComboBox, ComboBoxMixin):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.ComboBox.__init__(self, *args, **kwdargs)
class ComboBoxEntry(WidgetMask, gtk.ComboBoxEntry, ComboBoxMixin):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.ComboBoxEntry.__init__(self, *args, **kwdargs)
class Notebook(WidgetMask, gtk.Notebook):
def __init__(self, *args, **kwdargs):
WidgetMask.__init__(self)
gtk.Notebook.__init__(self, *args, **kwdargs)
def set_group_id(self, id):
super(Notebook, self).set_group_name(str(id))
def set_current_page(self, new_idx):
old_idx = self.get_current_page()
if old_idx != new_idx:
self.change()
super(Notebook, self).set_current_page(new_idx)
class MultiDragDropTreeView(gtk.TreeView):
'''TreeView that captures mouse events to make drag and drop work
properly
See: https://gist.github.com/kevinmehall/278480#file-multiple-selection-dnd-class-py
'''
def __init__(self):
super(MultiDragDropTreeView, self).__init__()
self.connect('button_press_event', self.on_button_press)
self.connect('button_release_event', self.on_button_release)
self.defer_select = False
def on_button_press(self, widget, event):
# Here we intercept mouse clicks on selected items so that we can
# drag multiple items without the click selecting only one
target = self.get_path_at_pos(int(event.x), int(event.y))
if (target
and event.type == gtk.gdk.BUTTON_PRESS
and not (event.state & (gtk.gdk.CONTROL_MASK|gtk.gdk.SHIFT_MASK))
and self.get_selection().path_is_selected(target[0])):
# disable selection
self.get_selection().set_select_function(lambda *ignore: False)
self.defer_select = target[0]
def on_button_release(self, widget, event):
# re-enable selection
self.get_selection().set_select_function(lambda *ignore: True)
target = self.get_path_at_pos(int(event.x), int(event.y))
if (self.defer_select and target
and self.defer_select == target[0]
and not (event.x==0 and event.y==0)): # certain drag and drop
self.set_cursor(target[0], target[1], False)
self.defer_select=False
class MDIWidget(gtk.Layout):
"""
Multiple Document Interface type widget for Gtk.
NOTE: *** This is a work in progress! ***
"""
def __init__(self):
super(MDIWidget, self).__init__()
self.children = []
self.cur_index = -1
self.selected_child = None
self.kbdmouse_mask = 0
self.cascade_offset = 50
self.connect("motion_notify_event", self.motion_notify_event)
self.connect("button_press_event", self.button_press_event)
self.connect("button_release_event", self.button_release_event)
mask = self.get_events()
self.set_events(mask
| gtk.gdk.ENTER_NOTIFY_MASK
| gtk.gdk.LEAVE_NOTIFY_MASK
| gtk.gdk.FOCUS_CHANGE_MASK
| gtk.gdk.STRUCTURE_MASK
| gtk.gdk.BUTTON_PRESS_MASK
| gtk.gdk.BUTTON_RELEASE_MASK
| gtk.gdk.KEY_PRESS_MASK
| gtk.gdk.KEY_RELEASE_MASK
| gtk.gdk.POINTER_MOTION_MASK
#| gtk.gdk.POINTER_MOTION_HINT_MASK
| gtk.gdk.SCROLL_MASK)
def append_page(self, widget, label):
vbox = gtk.VBox()
vbox.set_border_width(4)
evbox = gtk.EventBox()
evbox.add(label)
#evbox.modify_fg(gtk.STATE_NORMAL, gtk.gdk.color_parse("yellow"))
evbox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("skyblue"))
vbox.pack_start(evbox, fill=False, expand=False, padding=0)
vbox.pack_start(widget, fill=True, expand=True, padding=4)
frame = gtk.EventBox()
frame.set_size_request(300, 300)
frame.props.visible_window = True
frame.set_border_width(0)
frame.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse("palegreen1"))
frame.add(vbox)
frame.show_all()
pos = self.get_widget_position(frame)
if pos is None:
x, y = 0, 0
else:
x, y = pos
wd, ht = self.get_widget_size(frame)
subwin = Bunch.Bunch(widget=widget, label=evbox, frame=frame,
x=x, y=y, width=wd, height=ht)
self.children.append(subwin)
evbox.connect("button_press_event", self.select_child_cb, subwin)
frame.connect("button_press_event", self.start_resize_cb, subwin)
self.put(frame, self.cascade_offset, self.cascade_offset)
def set_tab_reorderable(self, w, tf):
pass
def set_tab_detachable(self, w, tf):
pass
def get_tab_label(self, w):
return None
def page_num(self, widget):
index, subwin = self._widget_to_index(widget)
return index
def get_nth_page(self, idx):
if 0 <= idx < len(self.children):
subwin = self.children[idx]
return subwin.widget
return None
def set_current_page(self, idx):
subwin = self.children[idx]
frame = subwin.frame
#frame.show()
self.raise_widget(subwin)
self.cur_index = idx
def get_current_page(self):
return self.cur_index
def _widget_to_index(self, widget):
index = 0
for subwin in self.children:
if subwin.widget == widget:
return index, subwin
index += 1
return -1, None
def remove_page(self, idx):
subwin = self.children[idx]
self.remove(subwin.widget)
def remove(self, widget):
idx, subwin = self._widget_to_index(widget)
if subwin is not None:
self.children.remove(subwin)
self.cur_index = -1
frame = subwin.frame
super(MDIWidget, self).remove(frame)
widget.unparent()
def get_widget_position(self, widget):
x, y, width, height = widget.get_allocation()
return x, y
def get_widget_size(self, widget):
x, y, width, height = widget.get_allocation()
return width, height
def raise_widget(self, subwin):
frame = subwin.frame
# Hack to bring widget to the top--no documentation on any other
# way to accomplish this
super(MDIWidget, self).remove(frame)
frame.unparent()
self.put(frame, subwin.x, subwin.y)
def select_child_cb(self, layout, event, subwin):
ex, ey = event.x_root, event.y_root
x, y = self.get_widget_position(subwin.frame)
subwin.x, subwin.y = x, y
# make this the selected widget
idx = self.page_num(subwin.widget)
if idx >= 0:
self.set_current_page(idx)
self.selected_child = Bunch.Bunch(subwin=subwin, action='move',
cr = self.setup_cr(self.bin_window),
x_origin=x, y_origin=y, dx=ex, dy=ey)
return True
def start_resize_cb(self, widget, event, subwin):
ex, ey = event.x_root, event.y_root
x, y = self.get_widget_position(subwin.frame)
subwin.x, subwin.y = x, y
self.selected_child = Bunch.Bunch(subwin=subwin, action='resize',
cr = self.setup_cr(self.bin_window),
x_origin=x, y_origin=y, dx=ex, dy=ey)
return True
def button_press_event(self, widget, event):
# event.button, event.x, event.y
x = event.x; y = event.y
button = self.kbdmouse_mask
if event.button != 0:
button |= 0x1 << (event.button - 1)
return True
def setup_cr(self, drawable):
cr = drawable.cairo_create()
cr.set_line_width(2)
cr.set_dash([ 3.0, 4.0, 6.0, 4.0], 5.0)
return cr
def button_release_event(self, widget, event):
# event.button, event.x, event.y
x = event.x_root; y = event.y_root
button = self.kbdmouse_mask
if event.button != 0:
button |= 0x1 << (event.button - 1)
if self.selected_child is not None:
bnch = self.selected_child
subwin = bnch.subwin
if bnch.action == 'move':
x = int(subwin.x + (x - bnch.dx))
y = int(subwin.y + (y - bnch.dy))
self.move(subwin.frame, x, y)
subwin.x, subwin.y = self.get_widget_position(subwin.frame)
elif bnch.action == 'resize':
wd = int(subwin.width + (x - bnch.dx))
ht = int(subwin.height + (y - bnch.dy))
subwin.frame.set_size_request(wd, ht)
subwin.width, subwin.height = self.get_widget_size(subwin.frame)
self.selected_child = None
return True
def motion_notify_event(self, widget, event):
button = self.kbdmouse_mask
if event.is_hint:
return
else:
x, y, state = event.x_root, event.y_root, event.state
if state & gtk.gdk.BUTTON1_MASK:
button |= 0x1
elif state & gtk.gdk.BUTTON2_MASK:
button |= 0x2
elif state & gtk.gdk.BUTTON3_MASK:
button |= 0x4
if (button & 0x1) and (self.selected_child is not None):
bnch = self.selected_child
subwin = bnch.subwin
if bnch.action == 'move':
x = int(subwin.x + (x - bnch.dx))
y = int(subwin.y + (y - bnch.dy))
self.move(subwin.frame, x, y)
elif bnch.action == 'resize':
wd = int(subwin.width + (x - bnch.dx))
ht = int(subwin.height + (y - bnch.dy))
subwin.frame.set_size_request(wd, ht)
return True
def tile_pages(self):
pass
def cascade_pages(self):
x, y = 0, 0
for subwin in self.children:
self.move(subwin.frame, x, y)
x += self.cascade_offset
y += self.cascade_offset
def use_tabs(self, tf):
pass
class FileSelection(object):
def __init__(self, parent_w, action=gtk.FILE_CHOOSER_ACTION_OPEN,
title="Select a file"):
self.parent = parent_w
# Create a new file selection widget
self.filew = gtk.FileChooserDialog(title=title, action=action)
self.filew.connect("destroy", self.close)
if action == gtk.FILE_CHOOSER_ACTION_SAVE:
self.filew.add_buttons(gtk.STOCK_SAVE, 1, gtk.STOCK_CANCEL, 0)
else:
self.filew.add_buttons(gtk.STOCK_OPEN, 1, gtk.STOCK_CANCEL, 0)
self.filew.set_default_response(1)
self.filew.connect("response", self.file_ok_sel)
# Connect the cancel_button to destroy the widget
#self.filew.cancel_button.connect("clicked", self.close)
def popup(self, title, callfn, initialdir=None, filename=None):
"""Let user select and load file."""
self.cb = callfn
self.filew.set_title(title)
if initialdir:
self.filew.set_current_folder(initialdir)
if filename:
#self.filew.set_filename(filename)
self.filew.set_current_name(filename)
self.filew.show()
# Get the selected filename
def file_ok_sel(self, w, rsp):
self.close(w)
if rsp == 0:
return
filepath = self.filew.get_filename()
self.cb(filepath)
def close(self, widget):
self.filew.hide()
class DirectorySelection(FileSelection):
"""Handle directory selection dialog."""
def __init__(self, parent_w):
super(DirectorySelection, self).__init__(
parent_w, action=gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER,
title="Select a directory")
def popup(self, title, callfn, initialdir=None):
"""Let user select a directory."""
super(DirectorySelection, self).popup(title, callfn, initialdir)
def combo_box_new_text():
liststore = gtk.ListStore(gobject.TYPE_STRING)
combobox = ComboBox()
combobox.set_model(liststore)
cell = gtk.CellRendererText()
combobox.pack_start(cell, True)
combobox.add_attribute(cell, 'text', 0)
return combobox
def get_scroll_info(event):
"""
Returns the (degrees, direction) of a scroll motion Gtk event.
"""
direction = None
if event.direction == gtk.gdk.SCROLL_UP:
direction = 0.0
elif event.direction == gtk.gdk.SCROLL_DOWN:
direction = 180.0
elif event.direction == gtk.gdk.SCROLL_LEFT:
direction = 270.0
elif event.direction == gtk.gdk.SCROLL_RIGHT:
direction = 90.0
# TODO: does Gtk encode the amount of scroll?
# 15 deg is standard 1-click turn for a wheel mouse
degrees = 15.0
return (degrees, direction)
def get_icon(iconpath, size=None):
if size is not None:
wd, ht = size
else:
wd, ht = 24, 24
pixbuf = pixbuf_new_from_file_at_size(iconpath, wd, ht)
return pixbuf
def get_font(font_family, point_size):
font = pango.FontDescription('%s %d' % (font_family, point_size))
return font
def pixbuf_new_from_xpm_data(xpm_data):
return gtk.gdk.pixbuf_new_from_xpm_data(xpm_data)
def pixbuf_new_from_array(data, rgbtype, bpp):
return gtk.gdk.pixbuf_new_from_array(data, rgbtype, bpp)
def pixbuf_new_from_data(rgb_buf, rgbtype, hasAlpha, bpp, dawd, daht, stride):
return gtk.gdk.pixbuf_new_from_data(rgb_buf, rgbtype, hasAlpha, bpp,
dawd, daht, stride)
def pixbuf_new_from_file_at_size(foldericon, width, height):
return gtk.gdk.pixbuf_new_from_file_at_size(foldericon,
width, height)
def make_cursor(widget, iconpath, x, y):
pixbuf = gtk.gdk.pixbuf_new_from_file(iconpath)
screen = widget.get_screen()
display = screen.get_display()
return gtk.gdk.Cursor(display, pixbuf, x, y)
def set_default_style():
module_home = os.path.split(sys.modules[__name__].__file__)[0]
gtk_rc = os.path.join(module_home, 'gtk_rc')
with open(gtk_rc, 'rb') as rc_f:
rc_data = rc_f.read()
gtk.rc_parse(rc_data)
#END
|
py | 7dfd5c3c67e1b4497203205dc51283135cccf641 | from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import PyQt5.sip
class canvas_win(QWidget):
def __init__(self):
super().__init__()
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents, True)
#self.setStyleSheet('QWidget {background-color: #FFFFFF; color: #000000;}')
class canvas_label(QLabel):
def __init__(self, parent):
QLabel.__init__(self, parent)
# self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.BypassWindowManagerHint | QtCore.Qt.Tool | QtCore.Qt.WindowStaysOnTopHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.setAttribute(QtCore.Qt.WA_TransparentForMouseEvents, True)
self.setStyleSheet('QLabel {background-color: #FFFFFF; color: #000000;}')
class canvas_label_back(QLabel):
def __init__(self, parent):
QLabel.__init__(self, parent)
self.setStyleSheet('QLabel {background-color: #FFFFFE; color: #FFFFFE;}') |
py | 7dfd5ced0111879f9efad386abe942af10582f65 | # -*- coding: utf-8 -*-
'''
Manage information about regular files, directories,
and special files on the minion, set/read user,
group, mode, and data
'''
# TODO: We should add the capability to do u+r type operations here
# some time in the future
from __future__ import absolute_import, print_function
# Import python libs
import datetime
import difflib
import errno
import fileinput
import fnmatch
import itertools
import logging
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import glob
import hashlib
import mmap
from collections import Iterable, Mapping
from functools import reduce # pylint: disable=redefined-builtin
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range, zip
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse
# pylint: enable=import-error,no-name-in-module,redefined-builtin
try:
import grp
import pwd
except ImportError:
pass
# Import salt libs
import salt.utils
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.filebuffer
import salt.utils.files
import salt.utils.find
import salt.utils.itertools
import salt.utils.locales
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError, get_error_message as _get_error_message
from salt.utils.files import HASHES, HASHES_REVMAP
log = logging.getLogger(__name__)
__func_alias__ = {
'makedirs_': 'makedirs'
}
def __virtual__():
'''
Only work on POSIX-like systems
'''
# win_file takes care of windows
if salt.utils.platform.is_windows():
return (
False,
'The file execution module cannot be loaded: only available on '
'non-Windows systems - use win_file instead.'
)
return True
def __clean_tmp(sfn):
'''
Clean out a template temp file
'''
if sfn.startswith(os.path.join(tempfile.gettempdir(),
salt.utils.files.TEMPFILE_PREFIX)):
# Don't remove if it exists in file_roots (any saltenv)
all_roots = itertools.chain.from_iterable(
six.itervalues(__opts__['file_roots']))
in_roots = any(sfn.startswith(root) for root in all_roots)
# Only clean up files that exist
if os.path.exists(sfn) and not in_roots:
os.remove(sfn)
def _error(ret, err_msg):
'''
Common function for setting error information for return dicts
'''
ret['result'] = False
ret['comment'] = err_msg
return ret
def _binary_replace(old, new):
'''
This function does NOT do any diffing, it just checks the old and new files
to see if either is binary, and provides an appropriate string noting the
difference between the two files. If neither file is binary, an empty
string is returned.
This function should only be run AFTER it has been determined that the
files differ.
'''
old_isbin = not __utils__['files.is_text_file'](old)
new_isbin = not __utils__['files.is_text_file'](new)
if any((old_isbin, new_isbin)):
if all((old_isbin, new_isbin)):
return u'Replace binary file'
elif old_isbin:
return u'Replace binary file with text file'
elif new_isbin:
return u'Replace text file with binary file'
return u''
def _get_bkroot():
'''
Get the location of the backup dir in the minion cache
'''
# Get the cachedir from the minion config
return os.path.join(__salt__['config.get']('cachedir'), 'file_backup')
def _splitlines_preserving_trailing_newline(str):
'''
Returns a list of the lines in the string, breaking at line boundaries and
preserving a trailing newline (if present).
Essentially, this works like ``str.striplines(False)`` but preserves an
empty line at the end. This is equivalent to the following code:
.. code-block:: python
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
'''
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
return lines
def gid_to_group(gid):
'''
Convert the group id to the group name on this system
gid
gid to convert to a group name
CLI Example:
.. code-block:: bash
salt '*' file.gid_to_group 0
'''
try:
gid = int(gid)
except ValueError:
# This is not an integer, maybe it's already the group name?
gid = group_to_gid(gid)
if gid == '':
# Don't even bother to feed it to grp
return ''
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
# If group is not present, fall back to the gid.
return gid
def group_to_gid(group):
'''
Convert the group to the gid on this system
group
group to convert to its gid
CLI Example:
.. code-block:: bash
salt '*' file.group_to_gid root
'''
if group is None:
return ''
try:
if isinstance(group, int):
return group
return grp.getgrnam(group).gr_gid
except KeyError:
return ''
def get_gid(path, follow_symlinks=True):
'''
Return the id of the group that owns a given file
path
file or directory of which to get the gid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_gid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('gid', -1)
def get_group(path, follow_symlinks=True):
'''
Return the group that owns a given file
path
file or directory of which to get the group
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_group /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('group', False)
def uid_to_user(uid):
'''
Convert a uid to a user name
uid
uid to convert to a username
CLI Example:
.. code-block:: bash
salt '*' file.uid_to_user 0
'''
try:
return pwd.getpwuid(uid).pw_name
except (KeyError, NameError):
# If user is not present, fall back to the uid.
return uid
def user_to_uid(user):
'''
Convert user name to a uid
user
user name to convert to its uid
CLI Example:
.. code-block:: bash
salt '*' file.user_to_uid root
'''
if user is None:
user = salt.utils.get_user()
try:
if isinstance(user, int):
return user
return pwd.getpwnam(user).pw_uid
except KeyError:
return ''
def get_uid(path, follow_symlinks=True):
'''
Return the id of the user that owns a given file
path
file or directory of which to get the uid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_uid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('uid', -1)
def get_user(path, follow_symlinks=True):
'''
Return the user that owns a given file
path
file or directory of which to get the user
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_user /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('user', False)
def get_mode(path, follow_symlinks=True):
'''
Return the mode of a file
path
file or directory of which to get the mode
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_mode /etc/passwd
.. versionchanged:: 2014.1.0
``follow_symlinks`` option added
'''
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get('mode', '')
def set_mode(path, mode):
'''
Set the mode of a file
path
file or directory of which to set the mode
mode
mode to set the path to
CLI Example:
.. code-block:: bash
salt '*' file.set_mode /etc/passwd 0644
'''
path = os.path.expanduser(path)
mode = str(mode).lstrip('0Oo')
if not mode:
mode = '0'
if not os.path.exists(path):
raise CommandExecutionError('{0}: File not found'.format(path))
try:
os.chmod(path, int(mode, 8))
except Exception:
return 'Invalid Mode ' + mode
return get_mode(path)
def lchown(path, user, group):
'''
Chown a file, pass the file the desired user and group without following
symlinks.
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
'''
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
return os.lchown(path, uid, gid)
def chown(path, user, group):
'''
Chown a file, pass the file the desired user and group
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
'''
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ''
if uid == '':
if user:
err += 'User does not exist\n'
else:
uid = -1
if gid == '':
if group:
err += 'Group does not exist\n'
else:
gid = -1
if not os.path.exists(path):
try:
# Broken symlinks will return false, but still need to be chowned
return os.lchown(path, uid, gid)
except OSError:
pass
err += 'File not found'
if err:
return err
return os.chown(path, uid, gid)
def chgrp(path, group):
'''
Change the group of a file
path
path to the file or directory
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chgrp /etc/passwd root
'''
path = os.path.expanduser(path)
user = get_user(path)
return chown(path, user, group)
def _cmp_attrs(path, attrs):
'''
.. versionadded: Oxygen
Compare attributes of a given file to given attributes.
Returns a pair (list) where first item are attributes to
add and second item are to be removed.
path
path to file to compare attributes with.
attrs
string of attributes to compare against a given file
'''
diff = [None, None]
lattrs = lsattr(path).get(path, '')
old = [chr for chr in lattrs if chr not in attrs]
if len(old) > 0:
diff[1] = ''.join(old)
new = [chr for chr in attrs if chr not in lattrs]
if len(new) > 0:
diff[0] = ''.join(new)
return diff
def lsattr(path):
'''
.. versionadded: Oxygen
Obtain the modifiable attributes of the given file. If path
is to a directory, an empty list is returned.
path
path to file to obtain attributes of. File/directory must exist.
CLI Example:
.. code-block:: bash
salt '*' file.lsattr foo1.txt
'''
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist.")
cmd = ['lsattr', path]
result = __salt__['cmd.run'](cmd, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith('lsattr'):
vals = line.split(None, 1)
results[vals[1]] = re.findall(r"[acdijstuADST]", vals[0])
return results
def chattr(*args, **kwargs):
'''
.. versionadded: Oxygen
Change the attributes of files
*args
list of files to modify attributes of
**kwargs - the following are valid <key,value> pairs:
operator
add|remove
determines whether attributes should be added or removed from files
attributes
acdijstuADST
string of characters representing attributes to add/remove from files
version
a version number to assign to the files
flags
[RVf]
flags to assign to chattr (recurse, verbose, suppress most errors)
CLI Example:
.. code-block:: bash
salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai
salt '*' file.chattr foo3.txt operator=remove attributes=i version=2
'''
args = [arg if salt.utils.stringutils.is_quoted(arg) else '"{0}"'.format(arg)
for arg in args]
operator = kwargs.pop('operator', None)
attributes = kwargs.pop('attributes', None)
flags = kwargs.pop('flags', None)
version = kwargs.pop('version', None)
if (operator is None) or (operator not in ['add', 'remove']):
raise SaltInvocationError(
"Need an operator: 'add' or 'remove' to modify attributes.")
if attributes is None:
raise SaltInvocationError("Need attributes: [AacDdijsTtSu]")
if operator == "add":
attrs = '+{0}'.format(attributes)
elif operator == "remove":
attrs = '-{0}'.format(attributes)
flgs = ''
if flags is not None:
flgs = '-{0}'.format(flags)
vrsn = ''
if version is not None:
vrsn = '-v {0}'.format(version)
cmd = 'chattr {0} {1} {2} {3}'.format(attrs, flgs, vrsn, ' '.join(args))
result = __salt__['cmd.run'](cmd, python_shell=False)
if bool(result):
raise CommandExecutionError(
"chattr failed to run, possibly due to bad parameters.")
return True
def get_sum(path, form='sha256'):
'''
Return the checksum for the given file. The following checksum algorithms
are supported:
* md5
* sha1
* sha224
* sha256 **(default)**
* sha384
* sha512
path
path to the file or directory
form
desired sum format
CLI Example:
.. code-block:: bash
salt '*' file.get_sum /etc/passwd sha512
'''
path = os.path.expanduser(path)
if not os.path.isfile(path):
return 'File not found'
return salt.utils.get_hash(path, form, 4096)
def get_hash(path, form='sha256', chunk_size=65536):
'''
Get the hash sum of a file
This is better than ``get_sum`` for the following reasons:
- It does not read the entire file into memory.
- It does not return a string on error. The returned value of
``get_sum`` cannot really be trusted since it is vulnerable to
collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
path
path to the file or directory
form
desired sum format
chunk_size
amount to sum at once
CLI Example:
.. code-block:: bash
salt '*' file.get_hash /etc/shadow
'''
return salt.utils.get_hash(os.path.expanduser(path), form, chunk_size)
def get_source_sum(file_name='',
source='',
source_hash=None,
source_hash_name=None,
saltenv='base'):
'''
.. versionadded:: 2016.11.0
Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to
obtain the hash and hash type from the parameters specified below.
file_name
Optional file name being managed, for matching with
:py:func:`file.extract_hash <salt.modules.file.extract_hash>`.
source
Source file, as used in :py:mod:`file <salt.states.file>` and other
states. If ``source_hash`` refers to a file containing hashes, then
this filename will be used to match a filename in that file. If the
``source_hash`` is a hash expression, then this argument will be
ignored.
source_hash
Hash file/expression, as used in :py:mod:`file <salt.states.file>` and
other states. If this value refers to a remote URL or absolute path to
a local file, it will be cached and :py:func:`file.extract_hash
<salt.modules.file.extract_hash>` will be used to obtain a hash from
it.
source_hash_name
Specific file name to look for when ``source_hash`` refers to a remote
file, used to disambiguate ambiguous matches.
saltenv : base
Salt fileserver environment from which to retrieve the source_hash. This
value will only be used when ``source_hash`` refers to a file on the
Salt fileserver (i.e. one beginning with ``salt://``).
CLI Example:
.. code-block:: bash
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz
'''
def _invalid_source_hash_format():
'''
DRY helper for reporting invalid source_hash input
'''
raise CommandExecutionError(
'Source hash {0} format is invalid. The supported formats are: '
'1) a hash, 2) an expression in the format <hash_type>=<hash>, or '
'3) either a path to a local file containing hashes, or a URI of '
'a remote hash file. Supported protocols for remote hash files '
'are: {1}. The hash may also not be of a valid length, the '
'following are supported hash types and lengths: {2}.'.format(
source_hash,
', '.join(salt.utils.files.VALID_PROTOS),
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
hash_fn = None
if os.path.isabs(source_hash):
hash_fn = source_hash
else:
try:
proto = _urlparse(source_hash).scheme
if proto in salt.utils.files.VALID_PROTOS:
hash_fn = __salt__['cp.cache_file'](source_hash, saltenv)
if not hash_fn:
raise CommandExecutionError(
'Source hash file {0} not found'.format(source_hash)
)
else:
if proto != '':
# Some unsupported protocol (e.g. foo://) is being used.
# We'll get into this else block if a hash expression
# (like md5=<md5 checksum here>), but in those cases, the
# protocol will be an empty string, in which case we avoid
# this error condition.
_invalid_source_hash_format()
except (AttributeError, TypeError):
_invalid_source_hash_format()
if hash_fn is not None:
ret = extract_hash(hash_fn, '', file_name, source, source_hash_name)
if ret is None:
_invalid_source_hash_format()
return ret
else:
# The source_hash is a hash expression
ret = {}
try:
ret['hash_type'], ret['hsum'] = \
[x.strip() for x in source_hash.split('=', 1)]
except AttributeError:
_invalid_source_hash_format()
except ValueError:
# No hash type, try to figure out by hash length
if not re.match('^[{0}]+$'.format(string.hexdigits), source_hash):
_invalid_source_hash_format()
ret['hsum'] = source_hash
source_hash_len = len(source_hash)
if source_hash_len in HASHES_REVMAP:
ret['hash_type'] = HASHES_REVMAP[source_hash_len]
else:
_invalid_source_hash_format()
if ret['hash_type'] not in HASHES:
raise CommandExecutionError(
'Invalid hash type \'{0}\'. Supported hash types are: {1}. '
'Either remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to a supported type.'
.format(ret['hash_type'], ', '.join(HASHES), ret['hsum'])
)
else:
hsum_len = len(ret['hsum'])
if hsum_len not in HASHES_REVMAP:
_invalid_source_hash_format()
elif hsum_len != HASHES[ret['hash_type']]:
raise CommandExecutionError(
'Invalid length ({0}) for hash type \'{1}\'. Either '
'remove the hash type and simply use \'{2}\' as the '
'source_hash, or change the hash type to \'{3}\''.format(
hsum_len,
ret['hash_type'],
ret['hsum'],
HASHES_REVMAP[hsum_len],
)
)
return ret
def check_hash(path, file_hash):
'''
Check if a file matches the given hash string
Returns ``True`` if the hash matches, otherwise ``False``.
path
Path to a file local to the minion.
hash
The hash to check against the file specified in the ``path`` argument.
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>=<hash_value>`` (e.g.
``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
'''
path = os.path.expanduser(path)
if not isinstance(file_hash, six.string_types):
raise SaltInvocationError('hash must be a string')
for sep in (':', '='):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
'Hash {0} (length: {1}) could not be matched to a supported '
'hash type. The supported hash types and lengths are: '
'{2}'.format(
file_hash,
hash_len,
', '.join(
['{0} ({1})'.format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)]
),
)
)
return get_hash(path, hash_type) == hash_value
def find(path, *args, **kwargs):
'''
Approximate the Unix ``find(1)`` command and return a list of paths that
meet the specified criteria.
The options include match criteria:
.. code-block:: text
name = path-glob # case sensitive
iname = path-glob # case insensitive
regex = path-regex # case sensitive
iregex = path-regex # case insensitive
type = file-types # match any listed type
user = users # match any listed user
group = groups # match any listed group
size = [+-]number[size-unit] # default unit = byte
mtime = interval # modified since date
grep = regex # search file contents
and/or actions:
.. code-block:: text
delete [= file-types] # default type = 'f'
exec = command [arg ...] # where {} is replaced by pathname
print [= print-opts]
and/or depth criteria:
.. code-block:: text
maxdepth = maximum depth to transverse in path
mindepth = minimum depth to transverse before checking files or directories
The default action is ``print=path``
``path-glob``:
.. code-block:: text
* = match zero or more chars
? = match any char
[abc] = match a, b, or c
[!abc] or [^abc] = match anything except a, b, and c
[x-y] = match chars x through y
[!x-y] or [^x-y] = match anything except chars x through y
{a,b,c} = match a or b or c
``path-regex``: a Python Regex (regular expression) pattern to match pathnames
``file-types``: a string of one or more of the following:
.. code-block:: text
a: all file types
b: block device
c: character device
d: directory
p: FIFO (named pipe)
f: plain file
l: symlink
s: socket
``users``: a space and/or comma separated list of user names and/or uids
``groups``: a space and/or comma separated list of group names and/or gids
``size-unit``:
.. code-block:: text
b: bytes
k: kilobytes
m: megabytes
g: gigabytes
t: terabytes
interval:
.. code-block:: text
[<num>w] [<num>d] [<num>h] [<num>m] [<num>s]
where:
w: week
d: day
h: hour
m: minute
s: second
print-opts: a comma and/or space separated list of one or more of the
following:
.. code-block:: text
group: group name
md5: MD5 digest of file contents
mode: file permissions (as integer)
mtime: last modification time (as time_t)
name: file basename
path: file absolute path
size: file size in bytes
type: file type
user: user name
CLI Examples:
.. code-block:: bash
salt '*' file.find / type=f name=\\*.bak size=+10m
salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete
'''
if 'delete' in args:
kwargs['delete'] = 'f'
elif 'print' in args:
kwargs['print'] = 'path'
try:
finder = salt.utils.find.Finder(kwargs)
except ValueError as ex:
return 'error: {0}'.format(ex)
ret = [item for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))] for item in i]
ret.sort()
return ret
def _sed_esc(string, escape_all=False):
'''
Escape single quotes and forward slashes
'''
special_chars = "^.[$()|*+?{"
string = string.replace("'", "'\"'\"'").replace("/", "\\/")
if escape_all is True:
for char in special_chars:
string = string.replace(char, "\\" + char)
return string
def sed(path,
before,
after,
limit='',
backup='.bak',
options='-r -e',
flags='g',
escape_all=False,
negate_match=False):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
options : ``-r -e``
Options to pass to sed
flags : ``g``
Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern
matching
negate_match : False
Negate the search command (``!``)
.. versionadded:: 0.17.0
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
'''
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
# Mandate that before and after are strings
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.append('-i{0}'.format(backup) if backup else '-i')
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}{negate_match}s/{before}/{after}/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
negate_match='!' if negate_match else '',
before=before,
after=after,
flags=flags
)
)
cmd.append(path)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def sed_contains(path,
text,
limit='',
flags='g'):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the file at ``path`` contains ``text``. Utilizes sed to
perform the search (line-wise search).
Note: the ``p`` flag will be added to any flags you pass in.
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
'''
# Largely inspired by Fabric's contrib.files.contains()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(str(text), False)
limit = _sed_esc(str(limit), False)
options = '-n -r -e'
if sys.platform == 'darwin':
options = options.replace('-r', '-E')
cmd = ['sed']
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r'{limit}s/{before}/$/{flags}'.format(
limit='/{0}/ '.format(limit) if limit else '',
before=before,
flags='p{0}'.format(flags)
)
)
cmd.append(path)
result = __salt__['cmd.run'](cmd, python_shell=False)
return bool(result)
def psed(path,
before,
after,
limit='',
backup='.bak',
flags='gMS',
escape_all=False,
multi=False):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file (pure Python version)
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit : ``''``
An initial pattern to search for before searching for ``before``
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
flags : ``gMS``
Flags to modify the search. Valid values are:
- ``g``: Replace all occurrences of the pattern, not just the first.
- ``I``: Ignore case.
- ``L``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\s`` and ``\\S``
dependent on the locale.
- ``M``: Treat multiple lines as a single line.
- ``S``: Make `.` match all characters, including newlines.
- ``U``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\d``, ``\\D``,
``\\s`` and ``\\S`` dependent on Unicode.
- ``X``: Verbose (whitespace is ignored).
multi: ``False``
If True, treat the entire file as a single line
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
'''
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
# Mandate that before and after are strings
path = os.path.expanduser(path)
multi = bool(multi)
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
# The pattern to replace with does not need to be escaped!!!
#after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
shutil.copy2(path, '{0}{1}'.format(path, backup))
with salt.utils.files.fopen(path, 'w') as ofile:
with salt.utils.files.fopen('{0}{1}'.format(path, backup), 'r') as ifile:
if multi is True:
for line in ifile.readline():
ofile.write(_psed(line, before, after, limit, flags))
else:
ofile.write(_psed(ifile.read(), before, after, limit, flags))
RE_FLAG_TABLE = {'I': re.I,
'L': re.L,
'M': re.M,
'S': re.S,
'U': re.U,
'X': re.X}
def _psed(text,
before,
after,
limit,
flags):
'''
Does the actual work for file.psed, so that single lines can be passed in
'''
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = ''.join(comps[1:])
count = 1
if 'g' in flags:
count = 0
flags = flags.replace('g', '')
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text
def uncomment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=False,
backup=backup)
def comment(path,
regex,
char='#',
backup='.bak'):
'''
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Comment out specified lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be commented;
this pattern will be wrapped in parenthesis and will move any
preceding/trailing ``^`` or ``$`` characters outside the parenthesis
(e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
char : ``#``
The character to be inserted at the beginning of a line in order to
comment it out
backup : ``.bak``
The file will be backed up before edit with this file extension
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
CLI Example:
.. code-block:: bash
salt '*' file.comment /etc/modules pcspkr
'''
return comment_line(path=path,
regex=regex,
char=char,
cmnt=True,
backup=backup)
def comment_line(path,
regex,
char='#',
cmnt=True,
backup='.bak'):
r'''
Comment or Uncomment a line in a text file.
:param path: string
The full path to the text file.
:param regex: string
A regex expression that begins with ``^`` that will find the line you wish
to comment. Can be as simple as ``^color =``
:param char: string
The character used to comment a line in the type of file you're referencing.
Default is ``#``
:param cmnt: boolean
True to comment the line. False to uncomment the line. Default is True.
:param backup: string
The file extension to give the backup file. Default is ``.bak``
Set to False/None to not keep a backup.
:return: boolean
Returns True if successful, False if not
CLI Example:
The following example will comment out the ``pcspkr`` line in the
``/etc/modules`` file using the default ``#`` character and create a backup
file named ``modules.bak``
.. code-block:: bash
salt '*' file.comment_line '/etc/modules' '^pcspkr'
CLI Example:
The following example will uncomment the ``log_level`` setting in ``minion``
config file if it is set to either ``warning``, ``info``, or ``debug`` using
the ``#`` character and create a backup file named ``minion.bk``
.. code-block:: bash
salt '*' file.comment_line 'C:\salt\conf\minion' '^log_level: (warning|info|debug)' '#' False '.bk'
'''
# Get the regex for comment or uncomment
if cmnt:
regex = '{0}({1}){2}'.format(
'^' if regex.startswith('^') else '',
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
else:
regex = r'^{0}\s*({1}){2}'.format(
char,
regex.lstrip('^').rstrip('$'),
'$' if regex.endswith('$') else '')
# Load the real path to the file
path = os.path.realpath(os.path.expanduser(path))
# Make sure the file exists
if not os.path.isfile(path):
raise SaltInvocationError('File not found: {0}'.format(path))
# Make sure it is a text file
if not __utils__['files.is_text_file'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'.format(path))
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
found = False
# Dictionaries for comparing changes
orig_file = []
new_file = []
# Buffer size for fopen
bufsize = os.path.getsize(path)
try:
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
# Is it in this line
if six.PY3:
line = line.decode(__salt_system_encoding__)
if re.match(regex, line):
# Load lines into dictionaries, set found to True
orig_file.append(line)
if cmnt:
new_file.append('{0}{1}'.format(char, line))
else:
new_file.append(line.lstrip(char))
found = True
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
# We've searched the whole file. If we didn't find anything, return False
if not found:
return False
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.normalize_mode(get_mode(path))
# Create a copy to read from and to use as a backup later
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=False)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='wb',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='rb',
buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
if six.PY3:
line = line.decode(__salt_system_encoding__)
try:
# Is it in this line
if re.match(regex, line):
# Write the new line
if cmnt:
wline = '{0}{1}'.format(char, line)
else:
wline = line.lstrip(char)
else:
# Write the existing line (no change)
wline = line
if six.PY3:
wline = wline.encode(__salt_system_encoding__)
w_file.write(wline)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if backup:
# Move the backup file to the original directory
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
os.remove(temp_file)
if not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
# Return a diff using the two dictionaries
return ''.join(difflib.unified_diff(orig_file, new_file))
def _get_flags(flags):
'''
Return an integer appropriate for use as a flag for the re module from a
list of human-readable strings
.. code-block:: python
>>> _get_flags(['MULTILINE', 'IGNORECASE'])
10
>>> _get_flags('MULTILINE')
8
>>> _get_flags(2)
2
'''
if isinstance(flags, six.string_types):
flags = [flags]
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
_flags_acc = []
for flag in flags:
_flag = getattr(re, str(flag).upper())
if not isinstance(_flag, six.integer_types):
raise SaltInvocationError(
'Invalid re flag given: {0}'.format(flag)
)
_flags_acc.append(_flag)
return reduce(operator.__or__, _flags_acc)
elif isinstance(flags, six.integer_types):
return flags
else:
raise SaltInvocationError(
'Invalid re flags: "{0}", must be given either as a single flag '
'string, a list of strings, or as an integer'.format(flags)
)
def _add_flags(flags, new_flags):
'''
Combine ``flags`` and ``new_flags``
'''
flags = _get_flags(flags)
new_flags = _get_flags(new_flags)
return flags | new_flags
def _mkstemp_copy(path,
preserve_inode=True):
'''
Create a temp file and move/copy the contents of ``path`` to the temp file.
Return the path to the temp file.
path
The full path to the file whose contents will be moved/copied to a temp file.
Whether it's moved or copied depends on the value of ``preserve_inode``.
preserve_inode
Preserve the inode of the file, so that any hard links continue to share the
inode with the original filename. This works by *copying* the file, reading
from the copy, and writing to the file at the original inode. If ``False``, the
file will be *moved* rather than copied, and a new file will be written to a
new inode, but using the original filename. Hard links will then share an inode
with the backup, instead (if using ``backup`` to create a backup copy).
Default is ``True``.
'''
temp_file = None
# Create the temp file
try:
temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to create temp file. "
"Exception: {0}".format(exc)
)
# use `copy` to preserve the inode of the
# original file, and thus preserve hardlinks
# to the inode. otherwise, use `move` to
# preserve prior behavior, which results in
# writing the file to a new inode.
if preserve_inode:
try:
shutil.copy2(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to copy file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
else:
try:
shutil.move(path, temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move file '{0}' to the "
"temp file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
return temp_file
def _starts_till(src, probe, strip_comments=True):
'''
Returns True if src and probe at least begins till some point.
'''
def _strip_comments(txt):
'''
Strip possible comments.
Usually commends are one or two symbols
'''
buff = txt.split(" ", 1)
return len(buff) == 2 and len(buff[0]) < 2 and buff[1] or txt
def _to_words(txt):
'''
Split by words
'''
return txt and [w for w in txt.strip().split(" ") if w.strip()] or txt
no_match = -1
equal = 0
if not src or not probe:
return no_match
if src == probe:
return equal
src = _to_words(strip_comments and _strip_comments(src) or src)
probe = _to_words(strip_comments and _strip_comments(probe) or probe)
a_buff, b_buff = len(src) < len(probe) and (src, probe) or (probe, src)
b_buff = ' '.join(b_buff)
for idx in range(len(a_buff)):
prb = ' '.join(a_buff[:-(idx + 1)])
if prb and b_buff.startswith(prb):
return idx
return no_match
def _regex_to_static(src, regex):
'''
Expand regular expression to static match.
'''
if not src or not regex:
return None
try:
src = re.search(regex, src, re.M)
except Exception as ex:
raise CommandExecutionError("{0}: '{1}'".format(_get_error_message(ex), regex))
return src and src.group() or regex
def _assert_occurrence(src, probe, target, amount=1):
'''
Raise an exception, if there are different amount of specified occurrences in src.
'''
occ = src.count(probe)
if occ > amount:
msg = 'more than'
elif occ < amount:
msg = 'less than'
elif not occ:
msg = 'no'
else:
msg = None
if msg:
raise CommandExecutionError('Found {0} expected occurrences in "{1}" expression'.format(msg, target))
def _get_line_indent(src, line, indent):
'''
Indent the line with the source line.
'''
if not indent:
return line
idt = []
for c in src:
if c not in ['\t', ' ']:
break
idt.append(c)
return ''.join(idt) + line.strip()
def line(path, content=None, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True):
'''
.. versionadded:: 2015.8.0
Edit a line in the configuration file. The ``path`` and ``content``
arguments are required, as well as passing in one of the ``mode``
options.
path
Filesystem path to the file to be edited.
content
Content of the line. Allowed to be empty if mode=delete.
match
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match becomes the ``content`` value.
mode
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added. This is based on the
``content`` argument.
- replace
If line already exists, it will be replaced.
- delete
Delete the line, once found.
- insert
Insert a line.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options.
location
Defines where to place content in the line. Note this option is only
used when ``mode=insert`` is specified. If a location is passed in, it
takes precedence over both the ``before`` and ``after`` kwargs. Valid
locations are:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
before
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
after
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
show_changes
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
backup
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
quiet
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
indent
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified.
CLI Example:
.. code-block:: bash
salt '*' file.line /etc/nsswitch.conf "networks:\tfiles dns" after="hosts:.*?" mode='ensure'
.. note::
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.line /path/to/file content="CREATEMAIL_SPOOL=no" match="CREATE_MAIL_SPOOL=yes" mode="replace"
'''
path = os.path.realpath(os.path.expanduser(path))
if not os.path.isfile(path):
if not quiet:
raise CommandExecutionError('File "{0}" does not exists or is not a file.'.format(path))
return False # No changes had happened
mode = mode and mode.lower() or mode
if mode not in ['insert', 'ensure', 'delete', 'replace']:
if mode is None:
raise CommandExecutionError('Mode was not defined. How to process the file?')
else:
raise CommandExecutionError('Unknown mode: "{0}"'.format(mode))
# We've set the content to be empty in the function params but we want to make sure
# it gets passed when needed. Feature #37092
modeswithemptycontent = ['delete']
if mode not in modeswithemptycontent and content is None:
raise CommandExecutionError('Content can only be empty if mode is {0}'.format(modeswithemptycontent))
del modeswithemptycontent
# Before/after has privilege. If nothing defined, match is used by content.
if before is None and after is None and not match:
match = content
with salt.utils.files.fopen(path, mode='r') as fp_:
body = fp_.read()
body_before = hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
after = _regex_to_static(body, after)
before = _regex_to_static(body, before)
match = _regex_to_static(body, match)
if os.stat(path).st_size == 0 and mode in ('delete', 'replace'):
log.warning('Cannot find text to {0}. File \'{1}\' is empty.'.format(mode, path))
body = ''
elif mode == 'delete':
body = os.linesep.join([line for line in body.split(os.linesep) if line.find(match) < 0])
elif mode == 'replace':
body = os.linesep.join([(_get_line_indent(file_line, content, indent)
if (file_line.find(match) > -1 and not file_line == content) else file_line)
for file_line in body.split(os.linesep)])
elif mode == 'insert':
if not location and not before and not after:
raise CommandExecutionError('On insert must be defined either "location" or "before/after" conditions.')
if not location:
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
for idx in range(len(lines)):
_line = lines[idx]
if _line.find(before) > -1 and idx <= len(lines) and lines[idx - 1].find(after) > -1:
out.append(_get_line_indent(_line, content, indent))
out.append(_line)
else:
out.append(_line)
body = os.linesep.join(out)
if before and not after:
_assert_occurrence(body, before, 'before')
out = []
lines = body.split(os.linesep)
for idx in range(len(lines)):
_line = lines[idx]
if _line.find(before) > -1:
cnd = _get_line_indent(_line, content, indent)
if not idx or (idx and _starts_till(lines[idx - 1], cnd) < 0): # Job for replace instead
out.append(cnd)
out.append(_line)
body = os.linesep.join(out)
elif after and not before:
_assert_occurrence(body, after, 'after')
out = []
lines = body.split(os.linesep)
for idx in range(len(lines)):
_line = lines[idx]
out.append(_line)
cnd = _get_line_indent(_line, content, indent)
if _line.find(after) > -1:
# No dupes or append, if "after" is the last line
if (idx < len(lines) and _starts_till(lines[idx + 1], cnd) < 0) or idx + 1 == len(lines):
out.append(cnd)
body = os.linesep.join(out)
else:
if location == 'start':
body = ''.join([content, body])
elif location == 'end':
body = ''.join([body, _get_line_indent(body[-1], content, indent) if body else content])
elif mode == 'ensure':
after = after and after.strip()
before = before and before.strip()
if before and after:
_assert_occurrence(body, before, 'before')
_assert_occurrence(body, after, 'after')
a_idx = b_idx = -1
idx = 0
body = body.split(os.linesep)
for _line in body:
idx += 1
if _line.find(before) > -1 and b_idx < 0:
b_idx = idx
if _line.find(after) > -1 and a_idx < 0:
a_idx = idx
# Add
if not b_idx - a_idx - 1:
body = body[:a_idx] + [content] + body[b_idx - 1:]
elif b_idx - a_idx - 1 == 1:
if _starts_till(body[a_idx:b_idx - 1][0], content) > -1:
body[a_idx] = _get_line_indent(body[a_idx - 1], content, indent)
else:
raise CommandExecutionError('Found more than one line between boundaries "before" and "after".')
body = os.linesep.join(body)
elif before and not after:
_assert_occurrence(body, before, 'before')
body = body.split(os.linesep)
out = []
for idx in range(len(body)):
if body[idx].find(before) > -1:
prev = (idx > 0 and idx or 1) - 1
out.append(_get_line_indent(body[prev], content, indent))
if _starts_till(out[prev], content) > -1:
del out[prev]
out.append(body[idx])
body = os.linesep.join(out)
elif not before and after:
_assert_occurrence(body, after, 'after')
body = body.split(os.linesep)
skip = None
out = []
for idx in range(len(body)):
if skip != body[idx]:
out.append(body[idx])
if body[idx].find(after) > -1:
next_line = idx + 1 < len(body) and body[idx + 1] or None
if next_line is not None and _starts_till(next_line, content) > -1:
skip = next_line
out.append(_get_line_indent(body[idx], content, indent))
body = os.linesep.join(out)
else:
raise CommandExecutionError("Wrong conditions? "
"Unable to ensure line without knowing "
"where to put it before and/or after.")
changed = body_before != hashlib.sha256(salt.utils.stringutils.to_bytes(body)).hexdigest()
if backup and changed and __opts__['test'] is False:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=True)
shutil.move(temp_file, '{0}.{1}'.format(path, time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())))
except (OSError, IOError) as exc:
raise CommandExecutionError("Unable to create the backup file of {0}. Exception: {1}".format(path, exc))
changes_diff = None
if changed:
if show_changes:
with salt.utils.files.fopen(path, 'r') as fp_:
path_content = _splitlines_preserving_trailing_newline(
fp_.read())
changes_diff = ''.join(difflib.unified_diff(
path_content, _splitlines_preserving_trailing_newline(body)))
if __opts__['test'] is False:
fh_ = None
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
fh_.write(body)
finally:
if fh_:
fh_.close()
return show_changes and changes_diff or changed
def replace(path,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
dry_run=False,
search_only=False,
show_changes=True,
ignore_if_missing=False,
preserve_inode=True,
backslash_literal=False,
):
'''
.. versionadded:: 0.17.0
Replace occurrences of a pattern in a file. If ``show_changes`` is
``True``, then a diff of what changed will be returned, otherwise a
``True`` will be returned when changes are made, and ``False`` when
no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
path
Filesystem path to the file to be edited. If a symlink is specified, it
will be resolved to its target.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text
count : 0
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int)
A list of flags defined in the :ref:`re module documentation
<contents-of-module-re>`. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str)
How much of the file to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found : False
.. versionadded:: 2014.7.0
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
.. versionadded:: 2014.7.0
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
backup : .bak
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
dry_run : False
If set to ``True``, no changes will be made to the file, the function
will just return the changes that would have been made (or a
``True``/``False`` value if ``show_changes`` is set to ``False``).
search_only : False
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes : True
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
.. note::
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
ignore_if_missing : False
.. versionadded:: 2015.8.0
If set to ``True``, this function will simply return ``False``
if the file doesn't exist. Otherwise, an error will be thrown.
preserve_inode : True
.. versionadded:: 2015.8.0
Preserve the inode of the file, so that any hard links continue to
share the inode with the original filename. This works by *copying* the
file, reading from the copy, and writing to the file at the original
inode. If ``False``, the file will be *moved* rather than copied, and a
new file will be written to a new inode, but using the original
filename. Hard links will then share an inode with the backup, instead
(if using ``backup`` to create a backup copy).
backslash_literal : False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.replace /path/to/file pattern='=' repl=':'
salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:'
CLI Examples:
.. code-block:: bash
salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info'
salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]'
'''
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False
else:
raise SaltInvocationError('File not found: {0}'.format(path))
if not __utils__['files.is_text_file'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError(
'search_only cannot be used with append/prepend_if_not_found'
)
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == 'file':
bufsize = filesize
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = [] # used for show_changes and change detection
new_file = [] # used for show_changes and change detection
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.normalize_mode(get_mode(path))
# Avoid TypeErrors by forcing repl to be bytearray related to mmap
# Replacement text may contains integer: 123 for example
repl = salt.utils.stringutils.to_bytes(str(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
found = False
temp_file = None
content = salt.utils.stringutils.to_str(not_found_content) if not_found_content and \
(prepend_if_not_found or
append_if_not_found) \
else salt.utils.stringutils.to_str(repl)
try:
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
r_data = None
# Use a read-only handle to open the file
with salt.utils.files.fopen(path,
mode='rb',
buffering=bufsize) as r_file:
try:
# mmap throws a ValueError if the file is empty.
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
except (ValueError, mmap.error):
# size of file in /proc is 0, but contains data
r_data = salt.utils.stringutils.to_bytes("".join(r_file))
if search_only:
# Just search; bail as early as a match is found
if re.search(cpattern, r_data):
return True # `with` block handles file closure
else:
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
# found anything? (even if no change)
if nrepl > 0:
found = True
# Identity check the potential change
has_changes = True if pattern != repl else has_changes
if prepend_if_not_found or append_if_not_found:
# Search for content, to avoid pre/appending the
# content if it was pre/appended in a previous run.
if re.search(salt.utils.stringutils.to_bytes('^{0}$'.format(re.escape(content))),
r_data,
flags=flags_num):
# Content was found, so set found.
found = True
orig_file = r_data.read(filesize).splitlines(True) \
if isinstance(r_data, mmap.mmap) \
else r_data.splitlines(True)
new_file = result.splitlines(True)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to open file '{0}'. "
"Exception: {1}".format(path, exc)
)
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
if has_changes and not dry_run:
# Write the replacement text in this block.
try:
# Create a copy to read from and to use as a backup later
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
r_data = None
try:
# Open the file in write mode
with salt.utils.files.fopen(path,
mode='w',
buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(temp_file,
mode='r',
buffering=bufsize) as r_file:
r_data = mmap.mmap(r_file.fileno(),
0,
access=mmap.ACCESS_READ)
result, nrepl = re.subn(cpattern,
repl.replace('\\', '\\\\') if backslash_literal else repl,
r_data,
count)
try:
w_file.write(salt.utils.stringutils.to_str(result))
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to write file '{0}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(0, not_found_content + b'\n')
else:
# append_if_not_found
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(b'\n'):
new_file[-1] += b'\n'
new_file.append(not_found_content + b'\n')
has_changes = True
if not dry_run:
try:
# Create a copy to read from and for later use as a backup
temp_file = _mkstemp_copy(path=path,
preserve_inode=preserve_inode)
except (OSError, IOError) as exc:
raise CommandExecutionError("Exception: {0}".format(exc))
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
for line in new_file:
fh_.write(salt.utils.stringutils.to_str(line))
finally:
fh_.close()
if backup and has_changes and not dry_run:
# keep the backup only if it was requested
# and only if there were any changes
backup_name = '{0}{1}'.format(path, backup)
try:
shutil.move(temp_file, backup_name)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move the temp file '{0}' to the "
"backup file '{1}'. "
"Exception: {2}".format(path, temp_file, exc)
)
if symlink:
symlink_backup = '{0}{1}'.format(given_path, backup)
target_backup = '{0}{1}'.format(target_path, backup)
# Always clobber any existing symlink backup
# to match the behaviour of the 'backup' option
try:
os.symlink(target_backup, symlink_backup)
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup)
except:
raise CommandExecutionError(
"Unable create backup symlink '{0}'. "
"Target was '{1}'. "
"Exception: {2}".format(symlink_backup, target_backup,
exc)
)
elif temp_file:
try:
os.remove(temp_file)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to delete temp file '{0}'. "
"Exception: {1}".format(temp_file, exc)
)
if not dry_run and not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
def get_changes():
orig_file_as_str = [salt.utils.stringutils.to_str(x) for x in orig_file]
new_file_as_str = [salt.utils.stringutils.to_str(x) for x in new_file]
return ''.join(difflib.unified_diff(orig_file_as_str, new_file_as_str))
if show_changes:
return get_changes()
# We may have found a regex line match but don't need to change the line
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not get_changes():
has_changes = False
return has_changes
def blockreplace(path,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
dry_run=False,
show_changes=True,
append_newline=False,
):
'''
.. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found : False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run
Don't make any edits to the file.
show_changes
Output a unified diff of the old file and the new file. If ``False``,
return a boolean if any changes were made.
append_newline:
Append a newline to the content block. For more information see:
https://github.com/saltstack/salt/issues/33686
.. versionadded:: 2016.3.4
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError('File not found: {0}'.format(path))
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
'Only one of append and prepend_if_not_found is permitted'
)
if not __utils__['files.is_text_file'](path):
raise SaltInvocationError(
'Cannot perform string replacements on a binary file: {0}'
.format(path)
)
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = []
new_file = []
in_block = False
old_content = ''
done = False
# we do not use in_place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
# we could also use salt.utils.filebuffer.BufferedReader
try:
fi_file = fileinput.input(path,
inplace=False, backup=False,
bufsize=1, mode='r')
for line in fi_file:
result = line
if marker_start in line:
# managed block start found, start recording
in_block = True
else:
if in_block:
if marker_end in line:
# end of block detected
in_block = False
# Check for multi-line '\n' terminated content as split will
# introduce an unwanted additional new line.
if content and content[-1] == '\n':
content = content[:-1]
# push new block content in file
for cline in content.split('\n'):
new_file.append(cline + '\n')
done = True
else:
# remove old content, but keep a trace
old_content += line
result = None
# else: we are not in the marked block, keep saving things
orig_file.append(line)
if result is not None:
new_file.append(result)
# end for. If we are here without block management we maybe have some problems,
# or we need to initialise the marked block
finally:
fi_file.close()
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
'Unterminated marked block. End of file reached before marker_end.'
)
if not done:
if prepend_if_not_found:
# add the markers and content at the beginning of file
new_file.insert(0, marker_end + '\n')
if append_newline is True:
new_file.insert(0, content + '\n')
else:
new_file.insert(0, content)
new_file.insert(0, marker_start + '\n')
done = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith('\n'):
new_file[-1] += '\n'
# add the markers and content at the end of file
new_file.append(marker_start + '\n')
if append_newline is True:
new_file.append(content + '\n')
else:
new_file.append(content)
new_file.append(marker_end + '\n')
done = True
else:
raise CommandExecutionError(
'Cannot edit marked block. Markers were not found in file.'
)
if done:
diff = ''.join(difflib.unified_diff(orig_file, new_file))
has_changes = diff is not ''
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms['user'] = get_user(path)
perms['group'] = get_group(path)
perms['mode'] = salt.utils.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = '{0}{1}'.format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
check_perms(backup_path,
None,
perms['user'],
perms['group'],
perms['mode'])
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, 'w')
for line in new_file:
fh_.write(line)
finally:
fh_.close()
# this may have overwritten file attrs
check_perms(path,
None,
perms['user'],
perms['group'],
perms['mode'])
if show_changes:
return diff
return has_changes
def search(path,
pattern,
flags=8,
bufsize=1,
ignore_if_missing=False,
multiline=False
):
'''
.. versionadded:: 0.17.0
Search for occurrences of a pattern in a file
Except for multiline, params are identical to
:py:func:`~salt.modules.file.replace`.
multiline
If true, inserts 'MULTILINE' into ``flags`` and sets ``bufsize`` to
'file'.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' file.search /etc/crontab 'mymaintenance.sh'
'''
if multiline:
flags = _add_flags(flags, 'MULTILINE')
bufsize = 'file'
# This function wraps file.replace on purpose in order to enforce
# consistent usage, compatible regex's, expected behavior, *and* bugs. :)
# Any enhancements or fixes to one should affect the other.
return replace(path,
pattern,
'',
flags=flags,
bufsize=bufsize,
dry_run=True,
search_only=True,
show_changes=False,
ignore_if_missing=ignore_if_missing)
def patch(originalfile, patchfile, options='', dry_run=False):
'''
.. versionadded:: 0.10.4
Apply a patch to a file or directory.
Equivalent to:
.. code-block:: bash
patch <options> -i <patchfile> <originalfile>
Or, when a directory is patched:
.. code-block:: bash
patch <options> -i <patchfile> -d <originalfile> -p0
originalfile
The full path to the file or directory to be patched
patchfile
A patch file to apply to ``originalfile``
options
Options to pass to patch.
CLI Example:
.. code-block:: bash
salt '*' file.patch /opt/file.txt /tmp/file.txt.patch
'''
patchpath = salt.utils.path.which('patch')
if not patchpath:
raise CommandExecutionError(
'patch executable not found. Is the distribution\'s patch '
'package installed?'
)
cmd = [patchpath]
cmd.extend(salt.utils.args.shlex_split(options))
if dry_run:
if __grains__['kernel'] in ('FreeBSD', 'OpenBSD'):
cmd.append('-C')
else:
cmd.append('--dry-run')
# this argument prevents interactive prompts when the patch fails to apply.
# the exit code will still be greater than 0 if that is the case.
if '-N' not in cmd and '--forward' not in cmd:
cmd.append('--forward')
has_rejectfile_option = False
for option in cmd:
if option == '-r' or option.startswith('-r ') \
or option.startswith('--reject-file'):
has_rejectfile_option = True
break
# by default, patch will write rejected patch files to <filename>.rej.
# this option prevents that.
if not has_rejectfile_option:
cmd.append('--reject-file=-')
cmd.extend(['-i', patchfile])
if os.path.isdir(originalfile):
cmd.extend(['-d', originalfile])
has_strip_option = False
for option in cmd:
if option.startswith('-p') or option.startswith('--strip='):
has_strip_option = True
break
if not has_strip_option:
cmd.append('--strip=0')
else:
cmd.append(originalfile)
return __salt__['cmd.run_all'](cmd, python_shell=False)
def contains(path, text):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return ``True`` if the file at ``path`` contains ``text``
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
stripped_text = str(text).strip()
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if stripped_text in chunk:
return True
return False
except (IOError, OSError):
return False
def contains_regex(path, regex, lchar=''):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the given regular expression matches on any line in the text
of a given file.
If the lchar argument (leading char) is specified, it
will strip `lchar` from the left side of each line before trying to match
CLI Example:
.. code-block:: bash
salt '*' file.contains_regex /etc/crontab
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.files.fopen(path, 'r') as target:
for line in target:
if lchar:
line = line.lstrip(lchar)
if re.search(regex, line):
return True
return False
except (IOError, OSError):
return False
def contains_glob(path, glob_expr):
'''
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return ``True`` if the given glob matches a string in the named file
CLI Example:
.. code-block:: bash
salt '*' file.contains_glob /etc/foobar '*cheese*'
'''
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if fnmatch.fnmatch(chunk, glob_expr):
return True
return False
except (IOError, OSError):
return False
def append(path, *args, **kwargs):
'''
.. versionadded:: 0.9.5
Append text to the end of a file
path
path to file
`*args`
strings to append to file
CLI Example:
.. code-block:: bash
salt '*' file.append /etc/motd \\
"With all thine offerings thou shalt offer salt." \\
"Salt is what makes things taste bad when it isn't in them."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.append /etc/motd args='cheese=spam'
salt '*' file.append /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
# Largely inspired by Fabric's contrib.files.append()
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
# Make sure we have a newline at the end of the file. Do this in binary
# mode so SEEK_END with nonzero offset will work.
with salt.utils.files.fopen(path, 'rb+') as ofile:
linesep = salt.utils.stringutils.to_bytes(os.linesep)
try:
ofile.seek(-len(linesep), os.SEEK_END)
except IOError as exc:
if exc.errno in (errno.EINVAL, errno.ESPIPE):
# Empty file, simply append lines at the beginning of the file
pass
else:
raise
else:
if ofile.read(len(linesep)) != linesep:
ofile.seek(0, os.SEEK_END)
ofile.write(linesep)
# Append lines in text mode
with salt.utils.files.fopen(path, 'a') as ofile:
for new_line in args:
ofile.write('{0}{1}'.format(new_line, os.linesep))
return 'Wrote {0} lines to "{1}"'.format(len(args), path)
def prepend(path, *args, **kwargs):
'''
.. versionadded:: 2014.7.0
Prepend text to the beginning of a file
path
path to file
`*args`
strings to prepend to the file
CLI Example:
.. code-block:: bash
salt '*' file.prepend /etc/motd \\
"With all thine offerings thou shalt offer salt." \\
"Salt is what makes things taste bad when it isn't in them."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.prepend /etc/motd args='cheese=spam'
salt '*' file.prepend /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
try:
with salt.utils.files.fopen(path) as fhr:
contents = fhr.readlines()
except IOError:
contents = []
preface = []
for line in args:
preface.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
contents = preface + contents
ofile.write(''.join(contents))
return 'Prepended {0} lines to "{1}"'.format(len(args), path)
def write(path, *args, **kwargs):
'''
.. versionadded:: 2014.7.0
Write text to a file, overwriting any existing contents.
path
path to file
`*args`
strings to write to the file
CLI Example:
.. code-block:: bash
salt '*' file.write /etc/motd \\
"With all thine offerings thou shalt offer salt."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.write /etc/motd args='cheese=spam'
salt '*' file.write /etc/motd args="['cheese=spam','spam=cheese']"
'''
path = os.path.expanduser(path)
if 'args' in kwargs:
if isinstance(kwargs['args'], list):
args = kwargs['args']
else:
args = [kwargs['args']]
contents = []
for line in args:
contents.append('{0}\n'.format(line))
with salt.utils.files.fopen(path, "w") as ofile:
ofile.write(''.join(contents))
return 'Wrote {0} lines to "{1}"'.format(len(contents), path)
def touch(name, atime=None, mtime=None):
'''
.. versionadded:: 0.9.5
Just like the ``touch`` command, create a file if it doesn't exist or
simply update the atime and mtime if it already does.
atime:
Access time in Unix epoch time
mtime:
Last modification in Unix epoch time
CLI Example:
.. code-block:: bash
salt '*' file.touch /var/log/emptyfile
'''
name = os.path.expanduser(name)
if atime and atime.isdigit():
atime = int(atime)
if mtime and mtime.isdigit():
mtime = int(mtime)
try:
if not os.path.exists(name):
with salt.utils.files.fopen(name, 'a') as fhw:
fhw.write('')
if not atime and not mtime:
times = None
elif not mtime and atime:
times = (atime, time.time())
elif not atime and mtime:
times = (time.time(), mtime)
else:
times = (atime, mtime)
os.utime(name, times)
except TypeError:
raise SaltInvocationError('atime and mtime must be integers')
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return os.path.exists(name)
def seek_read(path, size, offset):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and read it
path
path to file
seek
amount to read at once
offset
offset to start into the file
CLI Example:
.. code-block:: bash
salt '*' file.seek_read /path/to/file 4096 0
'''
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_RDONLY)
try:
os.lseek(seek_fh, int(offset), 0)
data = os.read(seek_fh, int(size))
finally:
os.close(seek_fh)
return data
def seek_write(path, data, offset):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and write to it
path
path to file
data
data to write to file
offset
position in file to start writing
CLI Example:
.. code-block:: bash
salt '*' file.seek_write /path/to/file 'some data' 4096
'''
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_WRONLY)
try:
os.lseek(seek_fh, int(offset), 0)
ret = os.write(seek_fh, data)
os.fsync(seek_fh)
finally:
os.close(seek_fh)
return ret
def truncate(path, length):
'''
.. versionadded:: 2014.1.0
Seek to a position on a file and delete everything after that point
path
path to file
length
offset into file to truncate
CLI Example:
.. code-block:: bash
salt '*' file.truncate /path/to/file 512
'''
path = os.path.expanduser(path)
with salt.utils.files.fopen(path, 'rb+') as seek_fh:
seek_fh.truncate(int(length))
def link(src, path):
'''
.. versionadded:: 2014.1.0
Create a hard link to a file
CLI Example:
.. code-block:: bash
salt '*' file.link /path/to/file /path/to/link
'''
src = os.path.expanduser(src)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.link(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def is_link(path):
'''
Check if the path is a symbolic link
CLI Example:
.. code-block:: bash
salt '*' file.is_link /path/to/link
'''
# This function exists because os.path.islink does not support Windows,
# therefore a custom function will need to be called. This function
# therefore helps API consistency by providing a single function to call for
# both operating systems.
return os.path.islink(os.path.expanduser(path))
def symlink(src, path):
'''
Create a symbolic link (symlink, soft link) to a file
CLI Example:
.. code-block:: bash
salt '*' file.symlink /path/to/file /path/to/link
'''
path = os.path.expanduser(path)
try:
if os.path.normpath(os.readlink(path)) == os.path.normpath(src):
log.debug('link already in correct state: %s -> %s', path, src)
return True
except OSError:
pass
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
os.symlink(src, path)
return True
except (OSError, IOError):
raise CommandExecutionError('Could not create \'{0}\''.format(path))
return False
def rename(src, dst):
'''
Rename a file or directory
CLI Example:
.. code-block:: bash
salt '*' file.rename /path/to/src /path/to/dst
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
try:
os.rename(src, dst)
return True
except OSError:
raise CommandExecutionError(
'Could not rename \'{0}\' to \'{1}\''.format(src, dst)
)
return False
def copy(src, dst, recurse=False, remove_existing=False):
'''
Copy a file or directory from source to dst
In order to copy a directory, the recurse flag is required, and
will by default overwrite files in the destination with the same path,
and retain all other existing files. (similar to cp -r on unix)
remove_existing will remove all files in the target directory,
and then copy files from the source.
.. note::
The copy function accepts paths that are local to the Salt minion.
This function does not support salt://, http://, or the other
additional file paths that are supported by :mod:`states.file.managed
<salt.states.file.managed>` and :mod:`states.file.recurse
<salt.states.file.recurse>`.
CLI Example:
.. code-block:: bash
salt '*' file.copy /path/to/src /path/to/dst
salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True
salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True remove_existing=True
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('File path must be absolute.')
if not os.path.exists(src):
raise CommandExecutionError('No such file or directory \'{0}\''.format(src))
if not salt.utils.platform.is_windows():
pre_user = get_user(src)
pre_group = get_group(src)
pre_mode = salt.utils.normalize_mode(get_mode(src))
try:
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
if not recurse:
raise SaltInvocationError(
"Cannot copy overwriting a directory without recurse flag set to true!")
if remove_existing:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
salt.utils.files.recursive_copy(src, dst)
else:
shutil.copyfile(src, dst)
except OSError:
raise CommandExecutionError(
'Could not copy \'{0}\' to \'{1}\''.format(src, dst)
)
if not salt.utils.platform.is_windows():
check_perms(dst, None, pre_user, pre_group, pre_mode)
return True
def lstat(path):
'''
.. versionadded:: 2014.1.0
Returns the lstat attributes for the given file or dir. Does not support
symbolic links.
CLI Example:
.. code-block:: bash
salt '*' file.lstat /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to file must be absolute.')
try:
lst = os.lstat(path)
return dict((key, getattr(lst, key)) for key in ('st_atime', 'st_ctime',
'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
except Exception:
return {}
def access(path, mode):
'''
.. versionadded:: 2014.1.0
Test whether the Salt process has the specified access to the file. One of
the following modes must be specified:
.. code-block::text
f: Test the existence of the path
r: Test the readability of the path
w: Test the writability of the path
x: Test whether the path can be executed
CLI Example:
.. code-block:: bash
salt '*' file.access /path/to/file f
salt '*' file.access /path/to/file x
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
modes = {'f': os.F_OK,
'r': os.R_OK,
'w': os.W_OK,
'x': os.X_OK}
if mode in modes:
return os.access(path, modes[mode])
elif mode in six.itervalues(modes):
return os.access(path, mode)
else:
raise SaltInvocationError('Invalid mode specified.')
def read(path, binary=False):
'''
.. versionadded:: 2017.7.0
Return the content of the file.
CLI Example:
.. code-block:: bash
salt '*' file.read /path/to/file
'''
access_mode = 'r'
if binary is True:
access_mode += 'b'
with salt.utils.files.fopen(path, access_mode) as file_obj:
return file_obj.read()
def readlink(path, canonicalize=False):
'''
.. versionadded:: 2014.1.0
Return the path that a symlink points to
If canonicalize is set to True, then it return the final target
CLI Example:
.. code-block:: bash
salt '*' file.readlink /path/to/link
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Path to link must be absolute.')
if not os.path.islink(path):
raise SaltInvocationError('A valid link was not specified.')
if canonicalize:
return os.path.realpath(path)
else:
return os.readlink(path)
def readdir(path):
'''
.. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('Dir path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
dirents = ['.', '..']
dirents.extend(os.listdir(path))
return dirents
def statvfs(path):
'''
.. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
try:
stv = os.statvfs(path)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
except (OSError, IOError):
raise CommandExecutionError('Could not statvfs \'{0}\''.format(path))
return False
def stats(path, hash_type=None, follow_symlinks=True):
'''
Return a dict containing the stats for a given file
CLI Example:
.. code-block:: bash
salt '*' file.stats /etc/passwd
'''
path = os.path.expanduser(path)
ret = {}
if not os.path.exists(path):
try:
# Broken symlinks will return False for os.path.exists(), but still
# have a uid and gid
pstat = os.lstat(path)
except OSError:
# Not a broken symlink, just a nonexistent path
return ret
else:
if follow_symlinks:
pstat = os.stat(path)
else:
pstat = os.lstat(path)
ret['inode'] = pstat.st_ino
ret['uid'] = pstat.st_uid
ret['gid'] = pstat.st_gid
ret['group'] = gid_to_group(pstat.st_gid)
ret['user'] = uid_to_user(pstat.st_uid)
ret['atime'] = pstat.st_atime
ret['mtime'] = pstat.st_mtime
ret['ctime'] = pstat.st_ctime
ret['size'] = pstat.st_size
ret['mode'] = str(oct(stat.S_IMODE(pstat.st_mode)))
if hash_type:
ret['sum'] = get_hash(path, hash_type)
ret['type'] = 'file'
if stat.S_ISDIR(pstat.st_mode):
ret['type'] = 'dir'
if stat.S_ISCHR(pstat.st_mode):
ret['type'] = 'char'
if stat.S_ISBLK(pstat.st_mode):
ret['type'] = 'block'
if stat.S_ISREG(pstat.st_mode):
ret['type'] = 'file'
if stat.S_ISLNK(pstat.st_mode):
ret['type'] = 'link'
if stat.S_ISFIFO(pstat.st_mode):
ret['type'] = 'pipe'
if stat.S_ISSOCK(pstat.st_mode):
ret['type'] = 'socket'
ret['target'] = os.path.realpath(path)
return ret
def rmdir(path):
'''
.. versionadded:: 2014.1.0
Remove the specified directory. Fails if a directory is not empty.
CLI Example:
.. code-block:: bash
salt '*' file.rmdir /tmp/foo/
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute.')
if not os.path.isdir(path):
raise SaltInvocationError('A valid directory was not specified.')
try:
os.rmdir(path)
return True
except OSError as exc:
return exc.strerror
def remove(path):
'''
Remove the named file. If a directory is supplied, it will be recursively
deleted.
CLI Example:
.. code-block:: bash
salt '*' file.remove /tmp/foo
'''
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError('File path must be absolute: {0}'.format(path))
try:
if os.path.isfile(path) or os.path.islink(path):
os.remove(path)
return True
elif os.path.isdir(path):
shutil.rmtree(path)
return True
except (OSError, IOError) as exc:
raise CommandExecutionError(
'Could not remove \'{0}\': {1}'.format(path, exc)
)
return False
def directory_exists(path):
'''
Tests to see if path is a valid directory. Returns True/False.
CLI Example:
.. code-block:: bash
salt '*' file.directory_exists /etc
'''
return os.path.isdir(os.path.expanduser(path))
def file_exists(path):
'''
Tests to see if path is a valid file. Returns True/False.
CLI Example:
.. code-block:: bash
salt '*' file.file_exists /etc/passwd
'''
return os.path.isfile(os.path.expanduser(path))
def path_exists_glob(path):
'''
Tests to see if path after expansion is a valid path (file or directory).
Expansion allows usage of ? * and character ranges []. Tilde expansion
is not supported. Returns True/False.
.. versionadded:: Hellium
CLI Example:
.. code-block:: bash
salt '*' file.path_exists_glob /etc/pam*/pass*
'''
return True if glob.glob(os.path.expanduser(path)) else False
def restorecon(path, recursive=False):
'''
Reset the SELinux context on a given path
CLI Example:
.. code-block:: bash
salt '*' file.restorecon /home/user/.ssh/authorized_keys
'''
if recursive:
cmd = ['restorecon', '-FR', path]
else:
cmd = ['restorecon', '-F', path]
return not __salt__['cmd.retcode'](cmd, python_shell=False)
def get_selinux_context(path):
'''
Get an SELinux context from a given path
CLI Example:
.. code-block:: bash
salt '*' file.get_selinux_context /etc/hosts
'''
out = __salt__['cmd.run'](['ls', '-Z', path], python_shell=False)
try:
ret = re.search(r'\w+:\w+:\w+:\w+', out).group(0)
except AttributeError:
ret = (
'No selinux context information is available for {0}'.format(path)
)
return ret
def set_selinux_context(path,
user=None,
role=None,
type=None, # pylint: disable=W0622
range=None): # pylint: disable=W0622
'''
Set a specific SELinux label on a given path
CLI Example:
.. code-block:: bash
salt '*' file.set_selinux_context path <user> <role> <type> <range>
salt '*' file.set_selinux_context /etc/yum.repos.d/epel.repo system_u object_r system_conf_t s0
'''
if not any((user, role, type, range)):
return False
cmd = ['chcon']
if user:
cmd.extend(['-u', user])
if role:
cmd.extend(['-r', role])
if type:
cmd.extend(['-t', type])
if range:
cmd.extend(['-l', range])
cmd.append(path)
ret = not __salt__['cmd.retcode'](cmd, python_shell=False)
if ret:
return get_selinux_context(path)
else:
return ret
def source_list(source, source_hash, saltenv):
'''
Check the source list and return the source to use
CLI Example:
.. code-block:: bash
salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base
'''
contextkey = '{0}_|-{1}_|-{2}'.format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
# get the master file list
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__['cp.list_master'](saltenv)]
mdirs = [(d, saltenv) for d in __salt__['cp.list_master_dirs'](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__['cp.list_master'](senv)]
mdirs += [(d, senv) for d in __salt__['cp.list_master_dirs'](senv)]
ret = None
for single in source:
if isinstance(single, dict):
# check the proto, if it is http or ftp then download the file
# to check, if it is salt then check the master list
# if it is a local file, check if the file exists
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = _urlparse(single_src)
proto = urlparsed_single_src.scheme
if proto == 'salt':
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single_src, single_hash)
break
elif proto == 'file' and os.path.exists(urlparsed_single_src.path):
ret = (single_src, single_hash)
break
elif single_src.startswith('/') and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, six.string_types):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = _urlparse(single)
proto = urlparsed_src.scheme
if proto == 'file' and os.path.exists(urlparsed_src.path):
ret = (single, source_hash)
break
elif proto.startswith('http') or proto == 'ftp':
ret = (single, source_hash)
break
elif single.startswith('/') and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
# None of the list items matched
raise CommandExecutionError(
'none of the specified sources were found'
)
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret
def apply_template_on_contents(
contents,
template,
context,
defaults,
saltenv):
'''
Return the contents after applying the templating engine
contents
template string
template
template format
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
CLI Example:
.. code-block:: bash
salt '*' file.apply_template_on_contents \\
contents='This is a {{ template }} string.' \\
template=jinja \\
"context={}" "defaults={'template': 'cool'}" \\
saltenv=base
'''
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
# Apply templating
contents = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context_dict,
saltenv=saltenv,
grains=__opts__['grains'],
pillar=__pillar__,
salt=__salt__,
opts=__opts__)['data']
if six.PY2:
contents = contents.encode('utf-8')
elif six.PY3 and isinstance(contents, bytes):
# bytes -> str
contents = contents.decode('utf-8')
else:
ret = {}
ret['result'] = False
ret['comment'] = ('Specified template format {0} is not supported'
).format(template)
return ret
return contents
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
**kwargs):
'''
Return the managed file data for file.managed
name
location where the file lives on the server
template
template format
source
managed source file
source_hash
hash of the source file
source_hash_name
When ``source_hash`` refers to a remote file, this specifies the
filename to look for in that file.
.. versionadded:: 2016.3.5
user
Owner of file
group
Group owner of file
mode
Permissions of file
attrs
Attributes of file
.. versionadded:: Oxygen
context
Variables to add to the template context
defaults
Default values of for context_dict
skip_verify
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None
'''
# Copy the file to the minion and templatize it
sfn = ''
source_sum = {}
def _get_local_file_source_sum(path):
'''
DRY helper for getting the source_sum value from a locally cached
path.
'''
return {'hsum': get_hash(path, form='sha256'), 'hash_type': 'sha256'}
# If we have a source defined, let's figure out what the hash is
if source:
urlparsed_source = _urlparse(source)
parsed_scheme = urlparsed_source.scheme
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path).rstrip(os.sep)
if parsed_scheme and parsed_scheme.lower() in 'abcdefghijklmnopqrstuvwxyz':
parsed_path = ':'.join([parsed_scheme, parsed_path])
parsed_scheme = 'file'
if parsed_scheme == 'salt':
source_sum = __salt__['cp.hash_file'](source, saltenv)
if not source_sum:
return '', {}, 'Source file {0} not found'.format(source)
elif not source_hash and parsed_scheme == 'file':
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(name,
source,
source_hash,
source_hash_name,
saltenv)
except CommandExecutionError as exc:
return '', {}, exc.strerror
else:
msg = (
'Unable to verify upstream hash of source file {0}, '
'please set source_hash or set skip_verify to True'
.format(source)
)
return '', {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__['cp.is_cached'](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get('hash_type', 'sha256')
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {'hsum': cached_sum, 'hash_type': htype}
elif cached_sum != source_sum.get('hsum', __opts__['hash_type']):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
# updated and the cache has to be refreshed, download the file.
if not sfn or cache_refetch:
try:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
# A 404 or other error code may raise an exception, catch it
# and return a comment that will fail the calling state.
return '', {}, 'Failed to cache {0}: {1}'.format(source, exc)
# If cache failed, sfn will be False, so do a truth check on sfn first
# as invoking os.path.exists() on a bool raises a TypeError.
if not sfn or not os.path.exists(sfn):
return sfn, {}, 'Source file \'{0}\' not found'.format(source)
if sfn == name:
raise SaltInvocationError(
'Source file cannot be the same as destination'
)
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__['grains'],
opts=__opts__,
**kwargs)
else:
return sfn, {}, ('Specified template format {0} is not supported'
).format(template)
if data['result']:
sfn = data['data']
hsum = get_hash(sfn, form='sha256')
source_sum = {'hash_type': 'sha256',
'hsum': hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data['data']
return sfn, source_sum, ''
def extract_hash(hash_fn,
hash_type='sha256',
file_name='',
source='',
source_hash_name=None):
'''
.. versionchanged:: 2016.3.5
Prior to this version, only the ``file_name`` argument was considered
for filename matches in the hash file. This would be problematic for
cases in which the user was relying on a remote checksum file that they
do not control, and they wished to use a different name for that file
on the minion from the filename on the remote server (and in the
checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the
remote file was at ``https://mydomain.tld/different_name.tar.gz``. The
:py:func:`file.managed <salt.states.file.managed>` state now also
passes this function the source URI as well as the ``source_hash_name``
(if specified). In cases where ``source_hash_name`` is specified, it
takes precedence over both the ``file_name`` and ``source``. When it is
not specified, ``file_name`` takes precedence over ``source``. This
allows for better capability for matching hashes.
.. versionchanged:: 2016.11.0
File name and source URI matches are no longer disregarded when
``source_hash_name`` is specified. They will be used as fallback
matches if there is no match to the ``source_hash_name`` value.
This routine is called from the :mod:`file.managed
<salt.states.file.managed>` state to pull a hash from a remote file.
Regular expressions are used line by line on the ``source_hash`` file, to
find a potential candidate of the indicated hash type. This avoids many
problems of arbitrary file layout rules. It specifically permits pulling
hash codes from debian ``*.dsc`` files.
If no exact match of a hash and filename are found, then the first hash
found (if any) will be returned. If no hashes at all are found, then
``None`` will be returned.
For example:
.. code-block:: yaml
openerp_7.0-latest-1.tar.gz:
file.managed:
- name: /tmp/openerp_7.0-20121227-075624-1_all.deb
- source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz
- source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc
CLI Example:
.. code-block:: bash
salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo
'''
hash_len = HASHES.get(hash_type)
if hash_len is None:
if hash_type:
log.warning(
'file.extract_hash: Unsupported hash_type \'%s\', falling '
'back to matching any supported hash_type', hash_type
)
hash_type = ''
hash_len_expr = '{0},{1}'.format(min(HASHES_REVMAP), max(HASHES_REVMAP))
else:
hash_len_expr = str(hash_len)
filename_separators = string.whitespace + r'\/'
if source_hash_name:
if not isinstance(source_hash_name, six.string_types):
source_hash_name = str(source_hash_name)
source_hash_name_idx = (len(source_hash_name) + 1) * -1
log.debug(
'file.extract_hash: Extracting %s hash for file matching '
'source_hash_name \'%s\'',
'any supported' if not hash_type else hash_type,
source_hash_name
)
if file_name:
if not isinstance(file_name, six.string_types):
file_name = str(file_name)
file_name_basename = os.path.basename(file_name)
file_name_idx = (len(file_name_basename) + 1) * -1
if source:
if not isinstance(source, six.string_types):
source = str(source)
urlparsed_source = _urlparse(source)
source_basename = os.path.basename(
urlparsed_source.path or urlparsed_source.netloc
)
source_idx = (len(source_basename) + 1) * -1
basename_searches = [x for x in (file_name, source) if x]
if basename_searches:
log.debug(
'file.extract_hash: %s %s hash for file matching%s: %s',
'If no source_hash_name match found, will extract'
if source_hash_name
else 'Extracting',
'any supported' if not hash_type else hash_type,
'' if len(basename_searches) == 1 else ' either of the following',
', '.join(basename_searches)
)
partial = None
found = {}
with salt.utils.files.fopen(hash_fn, 'r') as fp_:
for line in fp_:
line = line.strip()
hash_re = r'(?i)(?<![a-z0-9])([a-f0-9]{' + hash_len_expr + '})(?![a-z0-9])'
hash_match = re.search(hash_re, line)
matched = None
if hash_match:
matched_hsum = hash_match.group(1)
if matched_hsum is not None:
matched_type = HASHES_REVMAP.get(len(matched_hsum))
if matched_type is None:
# There was a match, but it's not of the correct length
# to match one of the supported hash types.
matched = None
else:
matched = {'hsum': matched_hsum,
'hash_type': matched_type}
if matched is None:
log.debug(
'file.extract_hash: In line \'%s\', no %shash found',
line,
'' if not hash_type else hash_type + ' '
)
continue
if partial is None:
partial = matched
def _add_to_matches(found, line, match_type, value, matched):
log.debug(
'file.extract_hash: Line \'%s\' matches %s \'%s\'',
line, match_type, value
)
found.setdefault(match_type, []).append(matched)
hash_matched = False
if source_hash_name:
if line.endswith(source_hash_name):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
# because the filename may contain spaces.
try:
if line[source_hash_name_idx] in string.whitespace:
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source_hash_name) + r'\s+',
line):
_add_to_matches(found, line, 'source_hash_name',
source_hash_name, matched)
hash_matched = True
if file_name:
if line.endswith(file_name_basename):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
# because the filename may contain spaces.
try:
if line[file_name_idx] in filename_separators:
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(file_name) + r'\s+', line):
_add_to_matches(found, line, 'file_name',
file_name, matched)
hash_matched = True
if source:
if line.endswith(source_basename):
# Same as above, we can't just do an rsplit here.
try:
if line[source_idx] in filename_separators:
_add_to_matches(found, line, 'source',
source, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source) + r'\s+', line):
_add_to_matches(found, line, 'source', source, matched)
hash_matched = True
if not hash_matched:
log.debug(
'file.extract_hash: Line \'%s\' contains %s hash '
'\'%s\', but line did not meet the search criteria',
line, matched['hash_type'], matched['hsum']
)
for found_type, found_str in (('source_hash_name', source_hash_name),
('file_name', file_name),
('source', source)):
if found_type in found:
if len(found[found_type]) > 1:
log.debug(
'file.extract_hash: Multiple %s matches for %s: %s',
found_type,
found_str,
', '.join(
['{0} ({1})'.format(x['hsum'], x['hash_type'])
for x in found[found_type]]
)
)
ret = found[found_type][0]
log.debug(
'file.extract_hash: Returning %s hash \'%s\' as a match of %s',
ret['hash_type'], ret['hsum'], found_str
)
return ret
if partial:
log.debug(
'file.extract_hash: Returning the partially identified %s hash '
'\'%s\'', partial['hash_type'], partial['hsum']
)
return partial
log.debug('file.extract_hash: No matches, returning None')
return None
def check_perms(name, ret, user, group, mode, attrs=None, follow_symlinks=False):
'''
Check the permissions on files, modify attributes and chown if needed
CLI Example:
.. code-block:: bash
salt '*' file.check_perms /etc/sudoers '{}' root root 400 ai
.. versionchanged:: 2014.1.3
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
if not ret:
ret = {'name': name,
'changes': {},
'comment': [],
'result': True}
orig_comment = ''
else:
orig_comment = ret['comment']
ret['comment'] = []
# Check permissions
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
if not cur:
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError('{0} does not exist'.format(name))
perms['luser'] = cur['user']
perms['lgroup'] = cur['group']
perms['lmode'] = salt.utils.normalize_mode(cur['mode'])
is_dir = os.path.isdir(name)
if not salt.utils.platform.is_windows() and not is_dir:
# List attributes on file
perms['lattrs'] = ''.join(lsattr(name)[name])
# Remove attributes on file so changes can be enforced.
if perms['lattrs']:
chattr(name, operator='remove', attributes=perms['lattrs'])
# Mode changes if needed
if mode is not None:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
mode = salt.utils.normalize_mode(mode)
if mode != perms['lmode']:
if __opts__['test'] is True:
ret['changes']['mode'] = mode
else:
set_mode(name, mode)
if mode != salt.utils.normalize_mode(get_mode(name)):
ret['result'] = False
ret['comment'].append(
'Failed to change mode to {0}'.format(mode)
)
else:
ret['changes']['mode'] = mode
# user/group changes if needed, then check if it worked
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(perms['luser'])
) or (
not salt.utils.platform.is_windows() and user != perms['luser']
):
perms['cuser'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(perms['lgroup'])
) or (
not salt.utils.platform.is_windows() and group != perms['lgroup']
):
perms['cgroup'] = group
if 'cuser' in perms or 'cgroup' in perms:
if not __opts__['test']:
if os.path.islink(name) and not follow_symlinks:
chown_func = lchown
else:
chown_func = chown
if user is None:
user = perms['luser']
if group is None:
group = perms['lgroup']
try:
chown_func(name, user, group)
except OSError:
ret['result'] = False
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (salt.utils.platform.is_windows() and
user_to_uid(user) != user_to_uid(
get_user(name, follow_symlinks=follow_symlinks)) and
user != ''
) or (
not salt.utils.platform.is_windows() and
user != get_user(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['user'] = user
else:
ret['result'] = False
ret['comment'].append('Failed to change user to {0}'
.format(user))
elif 'cuser' in perms and user != '':
ret['changes']['user'] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (salt.utils.platform.is_windows() and
group_to_gid(group) != group_to_gid(
get_group(name, follow_symlinks=follow_symlinks)) and
user != '') or (
not salt.utils.platform.is_windows() and
group != get_group(name, follow_symlinks=follow_symlinks) and
user != ''
):
if __opts__['test'] is True:
ret['changes']['group'] = group
else:
ret['result'] = False
ret['comment'].append('Failed to change group to {0}'
.format(group))
elif 'cgroup' in perms and user != '':
ret['changes']['group'] = group
if isinstance(orig_comment, six.string_types):
if orig_comment:
ret['comment'].insert(0, orig_comment)
ret['comment'] = '; '.join(ret['comment'])
if __opts__['test'] is True and ret['changes']:
ret['result'] = None
if not salt.utils.platform.is_windows() and not is_dir:
# Replace attributes on file if it had been removed
if perms['lattrs']:
chattr(name, operator='add', attributes=perms['lattrs'])
# Modify attributes of file if needed
if attrs is not None and not is_dir:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs[0] is not None or diff_attrs[1] is not None:
if __opts__['test'] is True:
ret['changes']['attrs'] = attrs
else:
if diff_attrs[0] is not None:
chattr(name, operator="add", attributes=diff_attrs[0])
if diff_attrs[1] is not None:
chattr(name, operator="remove", attributes=diff_attrs[1])
cmp_attrs = _cmp_attrs(name, attrs)
if cmp_attrs[0] is not None or cmp_attrs[1] is not None:
ret['result'] = False
ret['comment'].append(
'Failed to change attributes to {0}'.format(attrs)
)
else:
ret['changes']['attrs'] = attrs
return ret, perms
def check_managed(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
**kwargs):
'''
Check to see what changes need to be made for a file
CLI Example:
.. code-block:: bash
salt '*' file.check_managed /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
'''
# If the source is a list then find which file exists
source, source_hash = source_list(source, # pylint: disable=W0633
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
# Ignore permission for files written temporary directories
# Files in any path will still be set correctly using get_managed()
if name.startswith(tempfile.gettempdir()):
for key in ['user', 'group', 'mode']:
changes.pop(key, None)
__clean_tmp(sfn)
if changes:
log.info(changes)
comments = ['The following values are set to be changed:\n']
comments.extend('{0}: {1}\n'.format(key, val)
for key, val in six.iteritems(changes))
return None, ''.join(comments)
return True, 'The file {0} is in the correct state'.format(name)
def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
**kwargs):
'''
Return a dictionary of what changes need to be made for a file
CLI Example:
.. code-block:: bash
salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
'''
# If the source is a list then find which file exists
source, source_hash = source_list(source, # pylint: disable=W0633
source_hash,
saltenv)
sfn = ''
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs)
if comments:
__clean_tmp(sfn)
return False, comments
if sfn and source and keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
changes = check_file_meta(name, sfn, source, source_sum, user,
group, mode, attrs, saltenv, contents)
__clean_tmp(sfn)
return changes
def check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents=None):
'''
Check for the changes in the file metadata.
CLI Example:
.. code-block:: bash
salt '*' file.check_file_meta /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' base
.. note::
Supported hash types include sha512, sha384, sha256, sha224, sha1, and
md5.
name
Path to file destination
sfn
Template-processed source file contents
source
URL to file source
source_sum
File checksum information as a dictionary
.. code-block:: yaml
{hash_type: md5, hsum: <md5sum>}
user
Destination file user owner
group
Destination file group owner
mode
Destination file permissions mode
attrs
Destination file attributes
.. versionadded:: Oxygen
saltenv
Salt environment used to resolve source files
contents
File contents
'''
changes = {}
if not source_sum:
source_sum = {}
lstats = stats(name, hash_type=source_sum.get('hash_type', None), follow_symlinks=False)
if not lstats:
changes['newfile'] = name
return changes
if 'hsum' in source_sum:
if source_sum['hsum'] != lstats['sum']:
if not sfn and source:
sfn = __salt__['cp.cache_file'](
source,
saltenv,
source_hash=source_sum['hsum'])
if sfn:
try:
changes['diff'] = get_diff(
sfn, name, template=True, show_filenames=False)
except CommandExecutionError as exc:
changes['diff'] = exc.strerror
else:
changes['sum'] = 'Checksum differs'
if contents is not None:
# Write a tempfile with the static contents
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Compare the static contents with the named file
try:
differences = get_diff(name, tmp, show_filenames=False)
except CommandExecutionError as exc:
log.error('Failed to diff files: {0}'.format(exc))
differences = exc.strerror
__clean_tmp(tmp)
if differences:
if __salt__['config.option']('obfuscate_templates'):
changes['diff'] = '<Obfuscated Template>'
else:
changes['diff'] = differences
if not salt.utils.platform.is_windows():
# Check owner
if (user is not None
and user != lstats['user']
and user != lstats['uid']):
changes['user'] = user
# Check group
if (group is not None
and group != lstats['group']
and group != lstats['gid']):
changes['group'] = group
# Normalize the file mode
smode = salt.utils.normalize_mode(lstats['mode'])
mode = salt.utils.normalize_mode(mode)
if mode is not None and mode != smode:
changes['mode'] = mode
diff_attrs = _cmp_attrs(name, attrs)
if (
attrs is not None and
diff_attrs[0] is not None or
diff_attrs[1] is not None
):
changes['attrs'] = attrs
return changes
def get_diff(file1,
file2,
saltenv='base',
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None):
'''
Return unified diff of two files
file1
The first file to feed into the diff utility
.. versionchanged:: Oxygen
Can now be either a local or remote file. In earlier releases,
thuis had to be a file local to the minion.
file2
The second file to feed into the diff utility
.. versionchanged:: Oxygen
Can now be either a local or remote file. In earlier releases, this
had to be a file on the salt fileserver (i.e.
``salt://somefile.txt``)
show_filenames : True
Set to ``False`` to hide the filenames in the top two lines of the
diff.
show_changes : True
If set to ``False``, and there are differences, then instead of a diff
a simple message stating that show_changes is set to ``False`` will be
returned.
template : False
Set to ``True`` if two templates are being compared. This is not useful
except for within states, with the ``obfuscate_templates`` option set
to ``True``.
.. versionadded:: Oxygen
source_hash_file1
If ``file1`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: Oxygen
source_hash_file2
If ``file2`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: Oxygen
CLI Examples:
.. code-block:: bash
salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc
salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt
'''
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
# Local file paths will just return the same path back when passed
# to cp.cache_file.
cached_path = __salt__['cp.cache_file'](filename,
saltenv,
source_hash=source_hash)
if cached_path is False:
errors.append(
u'File {0} not found'.format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError(
'Failed to cache one or more files',
info=errors
)
args = []
for idx, filename in enumerate(files):
try:
with salt.utils.files.fopen(filename, 'r') as fp_:
args.append(fp_.readlines())
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Failed to read {0}: {1}'.format(
salt.utils.stringutils.to_str(filename),
exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__['config.option']('obfuscate_templates'):
ret = u'<Obfuscated Template>'
elif not show_changes:
ret = u'<show_changes=False>'
else:
bdiff = _binary_replace(*files)
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(
[salt.utils.stringutils.to_str(x) for x in files]
)
ret = salt.utils.locales.sdecode(
''.join(difflib.unified_diff(*args)) # pylint: disable=no-value-for-parameter
)
return ret
return u''
def manage_file(name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None, # pylint: disable=W0613
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors='strict',
**kwargs):
'''
Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).
name
location to place the file
sfn
location of cached file on the minion
This is the path to the file stored on the minion. This file is placed
on the minion using cp.cache_file. If the hash sum of that file
matches the source_sum, we do not transfer the file to the minion
again.
This file is then grabbed and if it has template set, it renders the
file to be placed into the correct place on the system using
salt.files.utils.copyfile()
ret
The initial state return data structure. Pass in ``None`` to use the
default structure.
source
file reference on the master
source_hash
sum hash for source
user
user owner
group
group owner
backup
backup_mode
attrs
attributes to be set on file: '' means remove all of them
.. versionadded: Oxygen
makedirs
make directories if they do not exist
template
format of templating
show_changes
Include diff in state return
contents:
contents to be placed in the file
dir_mode
mode for directories created with makedirs
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
keep_mode : False
If ``True``, and the ``source`` is a file from the Salt fileserver (or
a local file on the minion), the mode of the destination file will be
set to the mode of the source file.
.. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
encoding : None
If None, str() will be applied to contents.
If not None, specified encoding will be used.
See https://docs.python.org/3/library/codecs.html#standard-encodings
for the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the error handling schemes.
.. versionadded:: 2017.7.0
CLI Example:
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added
'''
name = os.path.expanduser(name)
if not ret:
ret = {'name': name,
'changes': {},
'comment': '',
'result': True}
# Ensure that user-provided hash string is lowercase
if source_sum and ('hsum' in source_sum):
source_sum['hsum'] = source_sum['hsum'].lower()
if source and not sfn:
# File is not present, cache it
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
htype = source_sum.get('hash_type', __opts__['hash_type'])
# Recalculate source sum now that file has been cached
source_sum = {
'hash_type': htype,
'hsum': get_hash(sfn, form=htype)
}
if keep_mode:
if _urlparse(source).scheme in ('salt', 'file') \
or source.startswith('/'):
try:
mode = __salt__['cp.stat_file'](source, saltenv=saltenv, octal=True)
except Exception as exc:
log.warning('Unable to stat %s: %s', sfn, exc)
# Check changes if the target file exists
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
# Only test the checksums on files with managed contents
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(real_name, source_sum.get('hash_type', __opts__['hash_type']))
else:
name_sum = None
# Check if file needs to be replaced
if source and (name_sum is None or source_sum.get('hsum', __opts__['hash_type']) != name_sum):
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server or local
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify \
and _urlparse(source).scheme not in ('salt', ''):
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3}). If the \'source_hash\' value '
'refers to a remote file with multiple possible '
'matches, then it may be necessary to set '
'\'source_hash_name\'.'.format(
source_sum['hash_type'],
source,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
# Print a diff equivalent to diff -u old new
if __salt__['config.option']('obfuscate_templates'):
ret['changes']['diff'] = '<Obfuscated Template>'
elif not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
try:
ret['changes']['diff'] = get_diff(
real_name, sfn, show_filenames=False)
except CommandExecutionError as exc:
ret['changes']['diff'] = exc.strerror
# Pre requisites are met, and the file needs to be replaced, do it
try:
salt.utils.files.copyfile(sfn,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
try:
differences = get_diff(
real_name, tmp, show_filenames=False,
show_changes=show_changes, template=True)
except CommandExecutionError as exc:
ret.setdefault('warnings', []).append(
'Failed to detect changes to file: {0}'.format(exc.strerror)
)
differences = ''
if differences:
ret['changes']['diff'] = differences
# Pre requisites are met, the file needs to be replaced, do it
try:
salt.utils.files.copyfile(tmp,
real_name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(tmp)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
__clean_tmp(tmp)
# Check for changing symlink to regular file here
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
try:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
except IOError as io_error:
__clean_tmp(sfn)
return _error(
ret, 'Failed to commit change: {0}'.format(io_error))
ret['changes']['diff'] = \
'Replace symbolic link with regular file'
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs, follow_symlinks)
if ret['changes']:
ret['comment'] = u'File {0} updated'.format(
salt.utils.locales.sdecode(name)
)
elif not ret['changes'] and ret['result']:
ret['comment'] = u'File {0} is in the correct state'.format(
salt.utils.locales.sdecode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else: # target file does not exist
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
# check for existence of windows drive letter
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret,
'{0} drive not present'.format(drive))
if dir_mode is None and mode is not None:
# Add execute bit to each nonzero digit in the mode, if
# dir_mode was not specified. Otherwise, any
# directories created with makedirs_() below can't be
# listed via a shell.
mode_list = [x for x in str(mode)][-3:]
for idx in range(len(mode_list)):
if mode_list[idx] != '0':
mode_list[idx] = str(int(mode_list[idx]) | 1)
dir_mode = ''.join(mode_list)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1121
makedirs_(name,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
kwargs.get('win_inheritance'))
# pylint: enable=E1121
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# It is a new file, set the diff accordingly
ret['changes']['diff'] = 'New file'
# Apply the new file
if not sfn:
sfn = __salt__['cp.cache_file'](source, saltenv)
if not sfn:
return _error(
ret, 'Source file \'{0}\' not found'.format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify \
and _urlparse(source).scheme != 'salt':
dl_sum = get_hash(sfn, source_sum['hash_type'])
if dl_sum != source_sum['hsum']:
ret['comment'] = (
'Specified {0} checksum for {1} ({2}) does not match '
'actual checksum ({3})'.format(
source_sum['hash_type'],
name,
source_sum['hsum'],
dl_sum
)
)
ret['result'] = False
return ret
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
else: # source != True
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret['changes'].pop('diff', None)
return _error(ret, 'Parent directory not present')
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
if mode:
current_umask = os.umask(0o77)
# Create a new file when test is False and source is None
if contents is None:
if not __opts__['test']:
if touch(name):
ret['changes']['new'] = 'file {0} created'.format(name)
ret['comment'] = 'Empty file'
else:
return _error(
ret, 'Empty file {0} not created'.format(name)
)
else:
if not __opts__['test']:
if touch(name):
ret['changes']['diff'] = 'New file'
else:
return _error(
ret, 'File {0} not created'.format(name)
)
if mode:
os.umask(current_umask)
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX,
text=True)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents))
with salt.utils.files.fopen(tmp, 'w') as tmp_:
if encoding:
log.debug('File will be encoded with {0}'.format(encoding))
tmp_.write(contents.encode(encoding=encoding, errors=encoding_errors))
else:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Copy into place
salt.utils.files.copyfile(tmp,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(tmp)
# Now copy the file contents if there is a source file
elif sfn:
salt.utils.files.copyfile(sfn,
name,
__salt__['config.backup_mode'](backup),
__opts__['cachedir'])
__clean_tmp(sfn)
# This is a new file, if no mode specified, use the umask to figure
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = os.umask(0)
os.umask(mask)
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
ret = check_perms(name,
ret,
kwargs.get('win_owner'),
kwargs.get('win_perms'),
kwargs.get('win_deny_perms'),
None,
kwargs.get('win_inheritance'))
else:
ret, _ = check_perms(name, ret, user, group, mode, attrs)
if not ret['comment']:
ret['comment'] = 'File ' + name + ' updated'
if __opts__['test']:
ret['comment'] = 'File ' + name + ' not updated'
elif not ret['changes'] and ret['result']:
ret['comment'] = 'File ' + name + ' is in the correct state'
if sfn:
__clean_tmp(sfn)
return ret
def mkdir(dir_path,
user=None,
group=None,
mode=None):
'''
Ensure that a directory is available.
CLI Example:
.. code-block:: bash
salt '*' file.mkdir /opt/jetty/context
'''
dir_path = os.path.expanduser(dir_path)
directory = os.path.normpath(dir_path)
if not os.path.isdir(directory):
# If a caller such as managed() is invoked with makedirs=True, make
# sure that any created dirs are created with the same user and group
# to follow the principal of least surprise method.
makedirs_perms(directory, user, group, mode)
return True
def makedirs_(path,
user=None,
group=None,
mode=None):
'''
Ensure that the directory containing this path is available.
.. note::
The path must end with a trailing slash otherwise the directory/directories
will be created up to the parent directory. For example if path is
``/opt/code``, then it would be treated as ``/opt/`` but if the path
ends with a trailing slash like ``/opt/code/``, then it would be
treated as ``/opt/code/``.
CLI Example:
.. code-block:: bash
salt '*' file.makedirs /opt/code/
'''
path = os.path.expanduser(path)
if mode:
mode = salt.utils.normalize_mode(mode)
# walk up the directory structure until we find the first existing
# directory
dirname = os.path.normpath(os.path.dirname(path))
if os.path.isdir(dirname):
# There's nothing for us to do
msg = 'Directory \'{0}\' already exists'.format(dirname)
log.debug(msg)
return msg
if os.path.exists(dirname):
msg = 'The path \'{0}\' already exists and is not a directory'.format(
dirname
)
log.debug(msg)
return msg
directories_to_create = []
while True:
if os.path.isdir(dirname):
break
directories_to_create.append(dirname)
current_dirname = dirname
dirname = os.path.dirname(dirname)
if current_dirname == dirname:
raise SaltInvocationError(
'Recursive creation for path \'{0}\' would result in an '
'infinite loop. Please use an absolute path.'.format(dirname)
)
# create parent directories from the topmost to the most deeply nested one
directories_to_create.reverse()
for directory_to_create in directories_to_create:
# all directories have the user, group and mode set!!
log.debug('Creating directory: %s', directory_to_create)
mkdir(directory_to_create, user=user, group=group, mode=mode)
def makedirs_perms(name,
user=None,
group=None,
mode='0755'):
'''
Taken and modified from os.makedirs to set user, group and mode for each
directory created.
CLI Example:
.. code-block:: bash
salt '*' file.makedirs_perms /opt/code
'''
name = os.path.expanduser(name)
path = os.path
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs_perms(head, user, group, mode)
except OSError as exc:
# be happy if someone already created the path
if exc.errno != errno.EEXIST:
raise
if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists
return
os.mkdir(name)
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
def get_devmm(name):
'''
Get major/minor info from a device
CLI Example:
.. code-block:: bash
salt '*' file.get_devmm /dev/chr
'''
name = os.path.expanduser(name)
if is_chrdev(name) or is_blkdev(name):
stat_structure = os.stat(name)
return (
os.major(stat_structure.st_rdev),
os.minor(stat_structure.st_rdev))
else:
return (0, 0)
def is_chrdev(name):
'''
Check if a file exists and is a character device.
CLI Example:
.. code-block:: bash
salt '*' file.is_chrdev /dev/chr
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the character device does not exist in the first place
return False
else:
raise
return stat.S_ISCHR(stat_structure.st_mode)
def mknod_chrdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a character device.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_chrdev /dev/chr 180 31
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating character device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFCHR,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Character device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created character device
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_blkdev(name):
'''
Check if a file exists and is a block device.
CLI Example:
.. code-block:: bash
salt '*' file.is_blkdev /dev/blk
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the block device does not exist in the first place
return False
else:
raise
return stat.S_ISBLK(stat_structure.st_mode)
def mknod_blkdev(name,
major,
minor,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a block device.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_blkdev /dev/blk 8 999
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating block device name:{0} major:{1} minor:{2} mode:{3}'
.format(name, major, minor, mode))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = None
else:
if os.mknod(name,
int(str(mode).lstrip('0Oo'), 8) | stat.S_IFBLK,
os.makedev(major, minor)) is None:
ret['changes'] = {'new': 'Block device {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created block device
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def is_fifo(name):
'''
Check if a file exists and is a FIFO.
CLI Example:
.. code-block:: bash
salt '*' file.is_fifo /dev/fifo
'''
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the fifo does not exist in the first place
return False
else:
raise
return stat.S_ISFIFO(stat_structure.st_mode)
def mknod_fifo(name,
user=None,
group=None,
mode='0660'):
'''
.. versionadded:: 0.17.0
Create a FIFO pipe.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_fifo /dev/fifo
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
log.debug('Creating FIFO name: {0}'.format(name))
try:
if __opts__['test']:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = None
else:
if os.mkfifo(name, int(str(mode).lstrip('0Oo'), 8)) is None:
ret['changes'] = {'new': 'Fifo pipe {0} created.'.format(name)}
ret['result'] = True
except OSError as exc:
# be happy it is already there
if exc.errno != errno.EEXIST:
raise
else:
ret['comment'] = 'File {0} exists and cannot be overwritten'.format(name)
# quick pass at verifying the permissions of the newly created fifo
check_perms(name,
None,
user,
group,
int('{0}'.format(mode)) if mode else None)
return ret
def mknod(name,
ntype,
major=0,
minor=0,
user=None,
group=None,
mode='0600'):
'''
.. versionadded:: 0.17.0
Create a block device, character device, or fifo pipe.
Identical to the gnu mknod.
CLI Examples:
.. code-block:: bash
salt '*' file.mknod /dev/chr c 180 31
salt '*' file.mknod /dev/blk b 8 999
salt '*' file.nknod /dev/fifo p
'''
ret = False
makedirs_(name, user, group)
if ntype == 'c':
ret = mknod_chrdev(name, major, minor, user, group, mode)
elif ntype == 'b':
ret = mknod_blkdev(name, major, minor, user, group, mode)
elif ntype == 'p':
ret = mknod_fifo(name, user, group, mode)
else:
raise SaltInvocationError(
'Node type unavailable: \'{0}\'. Available node types are '
'character (\'c\'), block (\'b\'), and pipe (\'p\').'.format(ntype)
)
return ret
def list_backups(path, limit=None):
'''
.. versionadded:: 0.17.0
Lists the previous versions of a file backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The path on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups /foo/bar/baz.txt
'''
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
src_dir = parent_dir.replace(':', '_')
else:
src_dir = parent_dir[1:]
# Figure out full path of location of backup file in minion cache
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [x for x in os.listdir(bkdir)
if os.path.isfile(os.path.join(bkdir, x))]:
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
strpfmt = '{0}_%a_%b_%d_%H-%M-%S_%f_%Y'.format(basename)
else:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
# File didn't match the strp format string, so it's not a backup
# for this file. Move on to the next one.
continue
if salt.utils.platform.is_windows():
str_format = '%a %b %d %Y %H-%M-%S.%f'
else:
str_format = '%a %b %d %Y %H:%M:%S.%f'
files.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]['Size'] = os.stat(location).st_size
files[timestamp]['Location'] = location
return dict(list(zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]]
)))
list_backup = salt.utils.alias_function(list_backups, 'list_backup')
def list_backups_dir(path, limit=None):
'''
Lists the previous versions of a directory backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The directory on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups_dir /foo/bar/baz/
'''
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error('file.list_backups_dir: \'limit\' value must be numeric')
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
# Figure out full path of location of backup folder in minion cache
bkdir = os.path.join(bkroot, parent_dir[1:])
if not os.path.isdir(bkdir):
return {}
files = {}
f = dict([(i, len(list(n))) for i, n in itertools.groupby([x.split("_")[0] for x in sorted(os.listdir(bkdir))])])
ff = os.listdir(bkdir)
for i, n in six.iteritems(f):
ssfile = {}
for x in sorted(ff):
basename = x.split('_')[0]
if i == basename:
strpfmt = '{0}_%a_%b_%d_%H:%M:%S_%f_%Y'.format(basename)
try:
timestamp = datetime.datetime.strptime(x, strpfmt)
except ValueError:
# Folder didn't match the strp format string, so it's not a backup
# for this folder. Move on to the next one.
continue
ssfile.setdefault(timestamp, {})['Backup Time'] = \
timestamp.strftime('%a %b %d %Y %H:%M:%S.%f')
location = os.path.join(bkdir, x)
ssfile[timestamp]['Size'] = os.stat(location).st_size
ssfile[timestamp]['Location'] = location
sfiles = dict(list(zip(list(range(n)), [ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]])))
sefiles = {i: sfiles}
files.update(sefiles)
return files
def restore_backup(path, backup_id):
'''
.. versionadded:: 0.17.0
Restore a previous version of a file that was backed up using Salt's
:ref:`file state backup <file-state-backups>` system.
path
The path on the minion to check for backups
backup_id
The numeric id for the backup you wish to restore, as found using
:mod:`file.list_backups <salt.modules.file.list_backups>`
CLI Example:
.. code-block:: bash
salt '*' file.restore_backup /foo/bar/baz.txt 0
'''
path = os.path.expanduser(path)
# Note: This only supports minion backups, so this function will need to be
# modified if/when master backups are implemented.
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
salt.utils.backup_minion(path, _get_bkroot())
try:
shutil.copyfile(backup['Location'], path)
except IOError as exc:
ret['comment'] = \
'Unable to restore {0} to {1}: ' \
'{2}'.format(backup['Location'], path, exc)
return ret
else:
ret['result'] = True
ret['comment'] = 'Successfully restored {0} to ' \
'{1}'.format(backup['Location'], path)
# Try to set proper ownership
if not salt.utils.platform.is_windows():
try:
fstat = os.stat(path)
except (OSError, IOError):
ret['comment'] += ', but was unable to set ownership'
else:
os.chown(path, fstat.st_uid, fstat.st_gid)
return ret
def delete_backup(path, backup_id):
'''
.. versionadded:: 0.17.0
Delete a previous version of a file that was backed up using Salt's
:ref:`file state backup <file-state-backups>` system.
path
The path on the minion to check for backups
backup_id
The numeric id for the backup you wish to delete, as found using
:mod:`file.list_backups <salt.modules.file.list_backups>`
CLI Example:
.. code-block:: bash
salt '*' file.delete_backup /var/cache/salt/minion/file_backup/home/foo/bar/baz.txt 0
'''
path = os.path.expanduser(path)
ret = {'result': False,
'comment': 'Invalid backup_id \'{0}\''.format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret['comment'] = 'backup_id \'{0}\' does not exist for ' \
'{1}'.format(backup_id, path)
return ret
try:
os.remove(backup['Location'])
except IOError as exc:
ret['comment'] = 'Unable to remove {0}: {1}'.format(backup['Location'],
exc)
else:
ret['result'] = True
ret['comment'] = 'Successfully removed {0}'.format(backup['Location'])
return ret
remove_backup = salt.utils.alias_function(delete_backup, 'remove_backup')
def grep(path,
pattern,
*opts):
'''
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
'''
path = os.path.expanduser(path)
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt)
except AttributeError:
split = salt.utils.args.shlex_split(str(opt))
if len(split) > 1:
raise SaltInvocationError(
'Passing multiple command line arguments in a single string '
'is not supported, please pass the following arguments '
'separately: {0}'.format(opt)
)
split_opts.extend(split)
cmd = ['grep'] + split_opts + [pattern, path]
try:
ret = __salt__['cmd.run_all'](cmd, python_shell=False)
except (IOError, OSError) as exc:
raise CommandExecutionError(exc.strerror)
return ret
def open_files(by_pid=False):
'''
Return a list of all physical open files on the system.
CLI Examples:
.. code-block:: bash
salt '*' file.open_files
salt '*' file.open_files by_pid=True
'''
# First we collect valid PIDs
pids = {}
procfs = os.listdir('/proc/')
for pfile in procfs:
try:
pids[int(pfile)] = []
except ValueError:
# Not a valid PID, move on
pass
# Then we look at the open files for each PID
files = {}
for pid in pids:
ppath = '/proc/{0}'.format(pid)
try:
tids = os.listdir('{0}/task'.format(ppath))
except OSError:
continue
# Collect the names of all of the file descriptors
fd_ = []
#try:
# fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid)))
#except:
# pass
for fpath in os.listdir('{0}/fd'.format(ppath)):
fd_.append('{0}/fd/{1}'.format(ppath, fpath))
for tid in tids:
try:
fd_.append(
os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid))
)
except OSError:
continue
for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)):
fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath))
fd_ = sorted(set(fd_))
# Loop through file descriptors and return useful data for each file
for fdpath in fd_:
# Sometimes PIDs and TIDs disappear before we can query them
try:
name = os.path.realpath(fdpath)
# Running stat on the file cuts out all of the sockets and
# deleted files from the list
os.stat(name)
except OSError:
continue
if name not in files:
files[name] = [pid]
else:
# We still want to know which PIDs are using each file
files[name].append(pid)
files[name] = sorted(set(files[name]))
pids[pid].append(name)
pids[pid] = sorted(set(pids[pid]))
if by_pid:
return pids
return files
def pardir():
'''
Return the relative parent directory path symbol for underlying OS
.. versionadded:: 2014.7.0
This can be useful when constructing Salt Formulas.
.. code-block:: jinja
{% set pardir = salt['file.pardir']() %}
{% set final_path = salt['file.join']('subdir', pardir, 'confdir') %}
CLI Example:
.. code-block:: bash
salt '*' file.pardir
'''
return os.path.pardir
def normpath(path):
'''
Returns Normalize path, eliminating double slashes, etc.
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- from salt['file.normpath'](tpldir + '/../vars.jinja') import parent_vars %}
CLI Example:
.. code-block:: bash
salt '*' file.normpath 'a/b/c/..'
'''
return os.path.normpath(path)
def basename(path):
'''
Returns the final component of a pathname
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- set filename = salt['file.basename'](source_file) %}
CLI Example:
.. code-block:: bash
salt '*' file.basename 'test/test.config'
'''
return os.path.basename(path)
def dirname(path):
'''
Returns the directory component of a pathname
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- from salt['file.dirname'](tpldir) + '/vars.jinja' import parent_vars %}
CLI Example:
.. code-block:: bash
salt '*' file.dirname 'test/path/filename.config'
'''
return os.path.dirname(path)
def join(*args):
'''
Return a normalized file system path for the underlying OS
.. versionadded:: 2014.7.0
This can be useful at the CLI but is frequently useful when scripting
combining path variables:
.. code-block:: jinja
{% set www_root = '/var' %}
{% set app_dir = 'myapp' %}
myapp_config:
file:
- managed
- name: {{ salt['file.join'](www_root, app_dir, 'config.yaml') }}
CLI Example:
.. code-block:: bash
salt '*' file.join '/' 'usr' 'local' 'bin'
'''
return os.path.join(*args)
def move(src, dst):
'''
Move a file or directory
CLI Example:
.. code-block:: bash
salt '*' file.move /path/to/src /path/to/dst
'''
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError('Source path must be absolute.')
if not os.path.isabs(dst):
raise SaltInvocationError('Destination path must be absolute.')
ret = {
'result': True,
'comment': "'{0}' moved to '{1}'".format(src, dst),
}
try:
shutil.move(src, dst)
except (OSError, IOError) as exc:
raise CommandExecutionError(
"Unable to move '{0}' to '{1}': {2}".format(src, dst, exc)
)
return ret
def diskusage(path):
'''
Recursively calculate disk usage of path and return it
in bytes
CLI Example:
.. code-block:: bash
salt '*' file.diskusage /path/to/check
'''
total_size = 0
seen = set()
if os.path.isfile(path):
stat_structure = os.stat(path)
ret = stat_structure.st_size
return ret
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat_structure = os.stat(fp)
except OSError:
continue
if stat_structure.st_ino in seen:
continue
seen.add(stat_structure.st_ino)
total_size += stat_structure.st_size
ret = total_size
return ret
|
py | 7dfd5e11ef9e1fa98ebfffca3b9cd0174f97fd31 | #!/usr/bin/env python
# Written against python 3.3.1
# Matasano Problem 32
# Break HMAC-SHA1 with a slightly less artificial timing leak.
from prob1 import rawToHex
import threading
import webserver
import time
import socket
import os
from prob17 import setByte
# .005: Would get the first four right
# .001: Got the first one wrong
# My response: raise iteration count to 10 -- would still get first one wrong
# raise to 20: would get first one wrong
# raise to 50: Seems to be working again
DELAY = .001
def startserver(delay):
server_thread = threading.Thread(target=webserver.start_server, args=[delay])
server_thread.start();
# Using the timing leak in this application, write a program that
# discovers the valid MAC for any file.
def discover_mac(message):
guess_mac = b'\x00' * 20;
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM);
sock.connect(('127.0.0.1', 9000))
for i in range(20):
nextbyte = guess_byte(sock, message, i, guess_mac);
guess_mac = setByte(guess_mac, i, nextbyte);
print (rawToHex(guess_mac));
return guess_mac;
def guess_byte(sock, message, index, guess_mac, numtrials=50):
timings = [0]*256;
# try each byte at the index
for i in range(256):
this_guess = setByte(guess_mac, index, i);
url = b'test?file=' + message + b'&signature=' + rawToHex(this_guess) + b'\n';
start = time.perf_counter()
for j in range(numtrials):
sock.send(url);
data = sock.recv(1024)
stop = time.perf_counter()
timings[i] = stop - start;
# assume the largest timing is the right one
value = timings.index(max(timings));
print("index: " + str(index) + " : value: " + hex(value));
return value;
def do32():
startserver(DELAY);
#known answer: b'6262261f054f0a17dfa68d87bf64f5416c128340'
discover_mac(b'Mary had a little lamb');
if __name__ == "__main__":
do32();
os._exit(0); |
py | 7dfd5fc8b08b508a1f0d14787dcde22e1a08be3b | r"""
Induced morphisms on homology
This module implements morphisms on homology induced by morphisms of
simplicial complexes. It requires working with field coefficients.
See :class:`InducedHomologyMorphism` for documentation.
AUTHORS:
- John H. Palmieri (2015.09)
"""
########################################################################
# Copyright (C) 2015 John H. Palmieri <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
#
# http://www.gnu.org/licenses/
########################################################################
# To do: implement morphisms of cubical complexes, with methods
# - domain
# - codomain
# - associated_chain_complex_morphism
# Once this is done, the code here ought to work without modification.
from sage.categories.graded_algebras_with_basis import GradedAlgebrasWithBasis
from sage.categories.graded_modules_with_basis import GradedModulesWithBasis
from sage.categories.morphism import Morphism
from sage.categories.homset import Hom
from sage.rings.rational_field import QQ
from sage.topology.simplicial_complex import SimplicialComplex
class InducedHomologyMorphism(Morphism):
r"""
An element of this class is a morphism of (co)homology groups
induced by a map of simplicial complexes. It requires working
with field coefficients.
INPUT:
- ``map`` -- the map of simplicial complexes
- ``base_ring`` -- a field (optional, default ``QQ``)
- ``cohomology`` -- boolean (optional, default ``False``). If
``True``, return the induced map in cohomology rather than
homology.
.. note::
This is not intended to be used directly by the user, but instead
via the method
:meth:`~sage.topology.simplicial_complex_morphism.SimplicialComplexMorphism.induced_homology_morphism`.
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: H = Hom(S1, S1)
sage: f = H({0:0, 1:2, 2:1}) # f switches two vertices
sage: f_star = f.induced_homology_morphism(QQ, cohomology=True)
sage: f_star
Graded algebra endomorphism of Cohomology ring of Minimal triangulation of the 1-sphere over Rational Field
Defn: induced by:
Simplicial complex endomorphism of Minimal triangulation of the 1-sphere
Defn: 0 |--> 0
1 |--> 2
2 |--> 1
sage: f_star.to_matrix(1)
[-1]
sage: f_star.to_matrix()
[ 1| 0]
[--+--]
[ 0|-1]
sage: T = simplicial_complexes.Torus()
sage: y = T.homology_with_basis(QQ).basis()[(1,1)]
sage: y.to_cycle()
(0, 5) - (0, 6) + (5, 6)
Since `(0,2) - (0,5) + (2,5)` is a cycle representing a homology
class in the torus, we can define a map `S^1 \to T` inducing an
inclusion on `H_1`::
sage: Hom(S1, T)({0:0, 1:2, 2:5})
Simplicial complex morphism:
From: Minimal triangulation of the 1-sphere
To: Minimal triangulation of the torus
Defn: 0 |--> 0
1 |--> 2
2 |--> 5
sage: g = Hom(S1, T)({0:0, 1:2, 2: 5})
sage: g_star = g.induced_homology_morphism(QQ)
sage: g_star.to_matrix(0)
[1]
sage: g_star.to_matrix(1)
[-1]
[ 0]
sage: g_star.to_matrix()
[ 1| 0]
[--+--]
[ 0|-1]
[ 0| 0]
[--+--]
[ 0| 0]
We can evaluate such a map on (co)homology classes::
sage: H = S1.homology_with_basis(QQ)
sage: a = H.basis()[(1,0)]
sage: g_star(a)
-h_{1,0}
sage: T = S1.product(S1, is_mutable=False)
sage: diag = Hom(S1,T).diagonal_morphism()
sage: b,c = list(T.cohomology_ring().basis(1))
sage: diag_c = diag.induced_homology_morphism(cohomology=True)
sage: diag_c(b)
h^{1,0}
sage: diag_c(c)
h^{1,0}
"""
def __init__(self, map, base_ring=None, cohomology=False):
"""
INPUT:
- ``map`` -- the map of simplicial complexes
- ``base_ring`` -- a field (optional, default ``QQ``)
- ``cohomology`` -- boolean (optional, default ``False``). If
``True``, return the induced map in cohomology rather than
homology.
EXAMPLES::
sage: from sage.homology.homology_morphism import InducedHomologyMorphism
sage: K = simplicial_complexes.RandomComplex(8, 3)
sage: H = Hom(K,K)
sage: id = H.identity()
sage: f = InducedHomologyMorphism(id, QQ)
sage: f.to_matrix(0) == 1 and f.to_matrix(1) == 1 and f.to_matrix(2) == 1
True
sage: f = InducedHomologyMorphism(id, ZZ)
Traceback (most recent call last):
...
ValueError: the coefficient ring must be a field
sage: S1 = simplicial_complexes.Sphere(1).barycentric_subdivision()
sage: S1.is_mutable()
True
sage: g = Hom(S1, S1).identity()
sage: h = g.induced_homology_morphism(QQ)
Traceback (most recent call last):
...
ValueError: the domain and codomain complexes must be immutable
sage: S1.set_immutable()
sage: g = Hom(S1, S1).identity()
sage: h = g.induced_homology_morphism(QQ)
"""
if (isinstance(map.domain(), SimplicialComplex)
and (map.domain().is_mutable() or map.codomain().is_mutable())):
raise ValueError('the domain and codomain complexes must be immutable')
if base_ring is None:
base_ring = QQ
if not base_ring.is_field():
raise ValueError('the coefficient ring must be a field')
self._cohomology = cohomology
self._map = map
self._base_ring = base_ring
if cohomology:
domain = map.codomain().cohomology_ring(base_ring=base_ring)
codomain = map.domain().cohomology_ring(base_ring=base_ring)
Morphism.__init__(self, Hom(domain, codomain,
category=GradedAlgebrasWithBasis(base_ring)))
else:
domain = map.domain().homology_with_basis(base_ring=base_ring, cohomology=cohomology)
codomain = map.codomain().homology_with_basis(base_ring=base_ring, cohomology=cohomology)
Morphism.__init__(self, Hom(domain, codomain,
category=GradedModulesWithBasis(base_ring)))
def base_ring(self):
"""
The base ring for this map
EXAMPLES::
sage: K = simplicial_complexes.Simplex(2)
sage: H = Hom(K,K)
sage: id = H.identity()
sage: id.induced_homology_morphism(QQ).base_ring()
Rational Field
sage: id.induced_homology_morphism(GF(13)).base_ring()
Finite Field of size 13
"""
return self._base_ring
def to_matrix(self, deg=None):
"""
The matrix for this map.
If degree ``deg`` is specified, return the matrix just in that
degree; otherwise, return the block matrix representing the
entire map.
INPUT:
- ``deg`` -- (optional, default ``None``) the degree
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: S1_b = S1.barycentric_subdivision()
sage: S1_b.set_immutable()
sage: d = {(0,): 0, (0,1): 1, (1,): 2, (1,2): 0, (2,): 1, (0,2): 2}
sage: f = Hom(S1_b, S1)(d)
sage: h = f.induced_homology_morphism(QQ)
sage: h.to_matrix(1)
[2]
sage: h.to_matrix()
[1|0]
[-+-]
[0|2]
"""
base_ring = self.base_ring()
# Compute homology case first.
domain = self._map.domain()
codomain = self._map.codomain()
phi_codomain, H_codomain = codomain.algebraic_topological_model(base_ring)
phi_domain, H_domain = domain.algebraic_topological_model(base_ring)
mat = (phi_codomain.pi().to_matrix(deg)
* self._map.associated_chain_complex_morphism(self.base_ring()).to_matrix(deg)
* phi_domain.iota().to_matrix(deg))
if self._cohomology:
mat = mat.transpose()
H_domain, H_codomain = H_codomain, H_domain
if deg is None:
import numpy as np
betti_domain = [H_domain.free_module_rank(n)
for n in range(domain.dimension()+1)]
betti_codomain = [H_codomain.free_module_rank(n)
for n in range(codomain.dimension()+1)]
# Compute cumulative sums of Betti numbers to get subdivisions:
row_subdivs = list(np.cumsum(betti_codomain[:-1]))
col_subdivs = list(np.cumsum(betti_domain[:-1]))
mat.subdivide(row_subdivs, col_subdivs)
return mat
def __call__(self, elt):
"""
Evaluate this map on ``elt``, an element of (co)homology.
INPUT:
- ``elt`` -- informally, an element of the domain of this
map. More formally, an element of
:class:`homology_vector_space_with_basis.HomologyVectorSpaceWithBasis`.
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: f = {0:0, 1:2, 2:1}
sage: H = Hom(S1,S1)
sage: g = H(f)
sage: h = g.induced_homology_morphism(QQ)
sage: x = S1.homology_with_basis().basis()[(1,0)]
sage: x
h_{1,0}
sage: h(x) # indirect doctest
-h_{1,0}
"""
base_ring = self.base_ring()
if self._cohomology:
codomain = self._map.domain().homology_with_basis(base_ring, cohomology=True)
if elt.parent().complex() != self._map.codomain():
raise ValueError('element is not a cohomology class for the correct complex')
else:
codomain = self._map.codomain().homology_with_basis(base_ring)
if elt.parent().complex() != self._map.domain():
raise ValueError('element is not a homology class for the correct complex')
return codomain.from_vector(self.to_matrix() * elt.to_vector())
def __eq__(self, other):
"""
Return ``True`` if and only if this map agrees with ``other``.
INPUT:
- ``other`` -- another induced homology morphism
This automatically returns ``False`` if the morphisms have
different domains, codomains, base rings, or values for their
cohomology flags
Otherwise, determine this by computing the matrices for this
map and ``other`` using the (same) basis for the homology
vector spaces.
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: K = simplicial_complexes.Simplex(2)
sage: f = Hom(S1, K)({0: 0, 1:1, 2:2})
sage: g = Hom(S1, K)({0: 0, 1:0, 2:0})
sage: f.induced_homology_morphism(QQ) == g.induced_homology_morphism(QQ)
True
sage: f.induced_homology_morphism(QQ) == g.induced_homology_morphism(GF(2))
False
sage: id = Hom(K, K).identity() # different domain
sage: f.induced_homology_morphism(QQ) == id.induced_homology_morphism(QQ)
False
"""
if (self._map.domain() != other._map.domain()
or self._map.codomain() != other._map.codomain()
or self.base_ring() != other.base_ring()
or self._cohomology != other._cohomology):
return False
dim = min(self._map.domain().dimension(), self._map.codomain().dimension())
return all(self.to_matrix(d) == other.to_matrix(d) for d in range(dim+1))
def is_identity(self):
"""
True if this is the identity map on (co)homology.
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: H = Hom(S1, S1)
sage: flip = H({0:0, 1:2, 2:1})
sage: flip.induced_homology_morphism(QQ).is_identity()
False
sage: flip.induced_homology_morphism(GF(2)).is_identity()
True
sage: rotate = H({0:1, 1:2, 2:0})
sage: rotate.induced_homology_morphism(QQ).is_identity()
True
"""
return self.to_matrix().is_one()
def is_surjective(self):
"""
True if this map is surjective on (co)homology.
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: K = simplicial_complexes.Simplex(2)
sage: H = Hom(S1, K)
sage: f = H({0:0, 1:1, 2:2})
sage: f.induced_homology_morphism().is_surjective()
True
sage: f.induced_homology_morphism(cohomology=True).is_surjective()
False
"""
m = self.to_matrix()
return m.rank() == m.nrows()
def is_injective(self):
"""
True if this map is injective on (co)homology.
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: K = simplicial_complexes.Simplex(2)
sage: H = Hom(S1, K)
sage: f = H({0:0, 1:1, 2:2})
sage: f.induced_homology_morphism().is_injective()
False
sage: f.induced_homology_morphism(cohomology=True).is_injective()
True
sage: T = simplicial_complexes.Torus()
sage: g = Hom(S1, T)({0:0, 1:3, 2: 6})
sage: g_star = g.induced_homology_morphism(QQ)
sage: g.is_injective()
True
"""
return self.to_matrix().right_nullity() == 0
def _repr_type(self):
"""
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: K = simplicial_complexes.Simplex(2)
sage: f = Hom(S1, K)({0: 0, 1:1, 2:2})
sage: f.induced_homology_morphism()._repr_type()
'Graded vector space'
sage: f.induced_homology_morphism(cohomology=True)._repr_type()
'Graded algebra'
"""
return "Graded vector space" if not self._cohomology else "Graded algebra"
def _repr_defn(self):
"""
EXAMPLES::
sage: S1 = simplicial_complexes.Sphere(1)
sage: K = simplicial_complexes.Simplex(2)
sage: f = Hom(S1, K)({0: 0, 1:1, 2:2})
sage: print(f.induced_homology_morphism()._repr_defn())
induced by:
Simplicial complex morphism:
From: Minimal triangulation of the 1-sphere
To: The 2-simplex
Defn: 0 |--> 0
1 |--> 1
2 |--> 2
"""
s = "induced by:"
s += '\n {}'.format('\n '.join(self._map._repr_().split('\n')))
return s
|
py | 7dfd5fecf603d1cf61db9e25cd30de7c2394c12d | import os
from permedcoe import constraint
from permedcoe import container
from permedcoe import binary
from permedcoe import task
from permedcoe import DIRECTORY_IN
from permedcoe import FILE_IN
from permedcoe import FILE_OUT
# Import single container and assets definitions
from MaBoSS_BB.definitions import MABOSS_ASSETS_PATH
from MaBoSS_BB.definitions import MABOSS_CONTAINER
from MaBoSS_BB.definitions import MABOSS_SENSITIVITY_CONTAINER
from MaBoSS_BB.definitions import COMPUTING_UNITS
# Globals
MABOSS_BINARY = os.path.join(MABOSS_ASSETS_PATH, "MaBoSS_analysis.sh")
MABOSS_SENSITIVIY_ANALYSIS_BINARY = os.path.join(
MABOSS_ASSETS_PATH, "MaBoSS_sensitivity_analysis.sh"
)
@constraint(computing_units=COMPUTING_UNITS)
@container(engine="SINGULARITY", image=MABOSS_CONTAINER)
@binary(binary=MABOSS_BINARY)
@task(data_folder=DIRECTORY_IN, ko_file=FILE_OUT)
def MaBoSS_analysis(
model="epithelial_cell_2", data_folder=None, ko_file=None, parallel=COMPUTING_UNITS
):
"""
Performs the MaBoSS analysis.
Produces the ko file, containing the set of selected gene candidates.
The Definition is equal to:
./MaBoSS_analysis.sh <model> <data_folder> <ko_file> <computing_units>
"""
# Empty function since it represents a binary execution:
pass
@constraint(computing_units=COMPUTING_UNITS)
@container(engine="SINGULARITY", image=MABOSS_SENSITIVITY_CONTAINER)
@binary(binary=MABOSS_SENSITIVIY_ANALYSIS_BINARY)
@task(
model_folder=DIRECTORY_IN,
genes_druggable=FILE_IN,
genes_target=FILE_IN,
result_file=FILE_OUT,
)
def MaBoSS_sensitivity_analysis(
model_folder=None, genes_druggable=None, genes_target=None, result_file=None
):
"""
Performs the MaBoSS analysis.
Produces the ko file, containing the set of selected gene candidates.
The Definition is equal to:
./MaBoSS_sensitivity_analysis.sh <model_folder> <genes_druggable> <genes_target> <result_file>
"""
# Empty function since it represents a binary execution:
pass
def invoke(input, output, config):
"""Common interface.
Args:
input (list): List containing the model and data folder.
output (list): list containing the output directory path.
config (dict): Configuration dictionary (not used).
Returns:
None
"""
if config and "uc2" in config.keys() and config["uc2"]:
# Process parameters
model_folder = input[0]
genes_druggable = input[1]
genes_target = input[2]
result_file = output[0]
# Building block invocation
MaBoSS_sensitivity_analysis(
model_folder=model_folder,
genes_druggable=genes_druggable,
genes_target=genes_target,
result_file=result_file,
)
else:
# Process parameters
model = input[0]
data_folder = input[1]
parallel = input[2]
ko_file = output[0]
# Building block invoCation
MaBoSS_analysis(
model=model, data_folder=data_folder, ko_file=ko_file, parallel=parallel
)
|
py | 7dfd607dbd222908e3f256bb565e9921848b30d2 | # This is an input class. Do not edit.
class BST:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def findKthLargestValueInBst(tree, k):
info = [0, 0] # info[0] = number of visited nodes, info[1] = value of the last node
findKthLargestValueInBstHelper(tree, k, info)
return info[1]
def findKthLargestValueInBstHelper(tree, k, info=[0, 0]):
if tree is None or info[0] >= k:
return
findKthLargestValueInBstHelper(tree.right, k, info)
if info[0] < k:
info[0] += 1
info[1] = tree.value
findKthLargestValueInBstHelper(tree.left, k, info)
class TestProgram:
def test_case_1(self):
root = BST(15)
root.left = BST(5)
root.left.left = BST(2)
root.left.left.left = BST(1)
root.left.left.right = BST(3)
root.left.right = BST(5)
root.right = BST(20)
root.right.left = BST(17)
root.right.right = BST(22)
k = 3
expected = 17
actual = findKthLargestValueInBst(root, k)
print(actual)
TestProgram().test_case_1()
|
py | 7dfd6119a218ef236d73df160a2b591edb3a9560 | class Solution:
def binaryGap(self, N):
pre = dist = 0
for i, c in enumerate(bin(N)[2:]):
if c == "1":
dist = max(dist, i - pre)
pre = i
return dist |
py | 7dfd6209893fc91e1163130114a57b7cc85a419d | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-08-19 13:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('torchbox', '0047_homepagehero_text'),
]
operations = [
migrations.AddField(
model_name='personpage',
name='is_senior',
field=models.BooleanField(default=False),
),
]
|
py | 7dfd6233a588d2d7eb43aa0eb21731e4ab65871c | import os
import numpy as np
from matplotlib import pyplot as plt
import cartopy as cart
import pyresample
class LLCMapper:
def __init__(self, ds, dx=0.25, dy=0.25):
# Extract LLC 2D coordinates
lons_1d = ds.XC.values.ravel()
lats_1d = ds.YC.values.ravel()
# Define original grid
self.orig_grid = pyresample.geometry.SwathDefinition(lons=lons_1d, lats=lats_1d)
# Longitudes latitudes to which we will we interpolate
lon_tmp = np.arange(-180, 180, dx) + dx/2
lat_tmp = np.arange(-90, 90, dy) + dy/2
# Define the lat lon points of the two parts.
self.new_grid_lon, self.new_grid_lat = np.meshgrid(lon_tmp, lat_tmp)
self.new_grid = pyresample.geometry.GridDefinition(lons=self.new_grid_lon,
lats=self.new_grid_lat)
def __call__(self, da, ax=None, projection=cart.crs.Robinson(), lon_0=-60, **plt_kwargs):
assert set(da.dims) == set(['face', 'j', 'i']), "da must have dimensions ['face', 'j', 'i']"
if ax is None:
fig, ax = plt.subplots(figsize=(12, 6))
field = pyresample.kd_tree.resample_nearest(self.orig_grid, da.values,
self.new_grid,
radius_of_influence=100000,
fill_value=None)
vmax = plt_kwargs.pop('vmax', field.max())
vmin = plt_kwargs.pop('vmin', field.min())
m = plt.axes(projection=projection)
x,y = self.new_grid_lon, self.new_grid_lat
# Find index where data is splitted for mapping
split_lon_idx = round(x.shape[1]/(360/(lon_0 if lon_0>0 else lon_0+360)))
p = m.pcolormesh(x[:,:split_lon_idx], y[:,:split_lon_idx], field[:,:split_lon_idx],
vmax=vmax, vmin=vmin, transform=cart.crs.PlateCarree(), zorder=1, **plt_kwargs)
p = m.pcolormesh(x[:,split_lon_idx:], y[:,split_lon_idx:], field[:,split_lon_idx:],
vmax=vmax, vmin=vmin, transform=cart.crs.PlateCarree(), zorder=2, **plt_kwargs)
m.add_feature(cart.feature.LAND, facecolor='0.5', zorder=3)
label = ''
if da.name is not None:
label = da.name
if 'units' in da.attrs:
label += ' [%s]' % da.attrs['units']
cb = plt.colorbar(p, shrink=0.4, label=label)
return m, ax
|
py | 7dfd624c96051217d1d22109bc709de9d2675a77 | #machine learning
import tensorflow as tf
#image input
import os
#possible image processing
import cv2
#PSNR calculation
import numpy as np
#output
import matplotlib.pyplot as plt
#load file
def load_file(path):
images = []
L = os.listdir(path)
for filename in L:
images.append(os.path.join(path,filename))
return images
#return in np format 100 array
def image_slice(slices,img,h,w,overlap=0):
image = cv2.imread(str(img)) #grayscale
if image is None: return
img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
i = 0
for r in range(123,603,(w-overlap)):
for c in range(215,415,(h-overlap)):
#if(True):cv2.rectangle(img,(r,c), (r+w,c+h), (255,255,255),1)
#if((603-r) == (415-c)):cv2.rectangle(img,(r,c), (r+w,c+h), (255,255,255),1)
s = img[c:(c+h),r:(r+w)]
#print(s.shape)
s.astype(dtype="float32")
slices.append(s.reshape((w*h)))
i+=1
#print(len(slices))
#print(r," ",c)
return img
#20*48
def image_merge(slices,num_r=0,num_c=0):
num_r = 20#8
num_c = 48#16
image = [[]]
for x in range(num_c):
#print("c ",x)
temp = np.reshape(slices[x*num_r],(10,10))
#cv2.imshow("s",temp)
cv2.waitKey(0)
#print(temp.shape)
for y in range(1,num_r):
#print(x*8+y)
temp = np.concatenate((temp,np.reshape(slices[x*num_r+y],(10,10))),axis = 0)
if(x == 0):image = temp
else:image = np.concatenate((image,temp),axis = 1)
#print(image.shape)
return image
#PSNR source: https://dsp.stackexchange.com/questions/38065/peak-signal-to-noise-ratio-psnr-in-python-for-an-image?rq=1
#Data
hight = 10
width = 10
image_size = hight*width
x = tf.placeholder(tf.float32, shape=[None,image_size])
t = tf.placeholder(tf.float32, shape=[None,image_size])
#Size
h1 = 50
h2 = 25
h3 = h1
#Weight
initializer = tf.variance_scaling_initializer()
w1 = tf.Variable(initializer([image_size,h1]),dtype=tf.float32)
w2 = tf.Variable(initializer([h1,h2]),dtype=tf.float32)
w3 = tf.Variable(initializer([h2,h3]),dtype=tf.float32)
w4 = tf.Variable(initializer([h3,image_size]),dtype=tf.float32)
bias = 1
w10 = tf.Variable(tf.zeros(h1)+bias,dtype=tf.float32)
w20 = tf.Variable(tf.zeros(h2)+bias,dtype=tf.float32)
w30 = tf.Variable(tf.zeros(h3)+bias,dtype=tf.float32)
w40 = tf.Variable(tf.zeros(image_size),dtype=tf.float32)
#Layer
method = tf.nn.relu#tf.nn.sigmoid#tf.nn.relu
L1 = method(tf.matmul(x,w1)+w10)
L2 = method(tf.matmul(L1,w2)+w20)
L3 = method(tf.matmul(L2,w3)+w30)
y = method(tf.matmul(L3,w4)+w40)
#Train
Lr = 0.01
loss = tf.reduce_mean(tf.square(y-t))
optimizer=tf.train.AdamOptimizer(Lr)
train=optimizer.minimize(loss)
#Validation
PIXEL_MAX = tf.constant(255,dtype=tf.float32)
mse = tf.reduce_mean(tf.square(y-t))
psnr = tf.multiply(tf.constant(20,dtype=tf.float32),tf.divide(tf.log(PIXEL_MAX/tf.math.square(mse)),tf.log(tf.constant(10,dtype=tf.float32))))
#Input
pass_flag = False
train_x = load_file("./x")
size = len(train_x)
train_t = load_file("./t")
train_x_batch = []
train_t_batch = []
test_x = load_file("./tx")
tsize = len(test_x)
test_t = load_file("./tt")
test_x_batch = []
test_t_batch = []
if(len(train_t) != size or tsize != len(test_t)):
print("Error: invalid train data {} != {}".format(size,len(train_t)))
print("Error: invalid train data {} != {}".format(tsize,len(test_t)))
else:
print("Processing....")
for key in range(size):
image_slice(train_x_batch,train_x[key],hight,width,2)
image_slice(train_t_batch,train_t[key],hight,width,2)
image_slice(test_x_batch,train_x[key],hight,width)
image_slice(test_t_batch,train_t[key],hight,width)
if(len(train_x_batch) == len(train_t_batch) and len(train_x_batch) != 0 ):
pass_flag = True
else:
print(train_x_batch)
np.array(train_x_batch)
np.array(train_t_batch)
#Train 15 image, test 3 image, 1000 time
#print(train_t_batch[0])
#pass_flag = False
batch_flag = 0 #0: all together, 1: each slice, 2: each image
epoch = 1000
batch_size = size
#Result
sample = []
Loss = []
Loss_Batch = [[0]*len(train_x_batch)]*epoch
#Loss_image = [[0]*(len(train_x_batch)/114)]*epoch
MSE = []
if pass_flag:
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
if(batch_flag == 0):
for ti in range(epoch):
sess.run(train,feed_dict={x:train_x_batch,t:train_t_batch})
train_loss=loss.eval(feed_dict={x:train_x_batch,t:train_t_batch})
Loss.append(train_loss)
print("epoch {}: {}".format(ti,train_loss))
if(batch_flag == 1):
for ti in range(epoch):
for batch in range(len(train_x_batch)):
sess.run(train,feed_dict={x:np.reshape(train_x_batch[0],(1,image_size)),t:np.reshape(train_t_batch[batch],(1,image_size))})
train_loss=loss.eval(feed_dict={x:np.reshape(train_x_batch[0],(1,image_size)),t:np.reshape(train_t_batch[batch],(1,image_size))})
Loss_batch[ti][batch] = train_loss
#test
for b in range(len(test_x_batch)):
output_sample = y.eval(feed_dict={x:np.reshape(test_x_batch[b],(1,image_size))})
expect_sample = sess.run(y,feed_dict={x:np.reshape(test_x_batch[b],(1,image_size))})
sample.append(expect_sample)
result_loss=loss.eval(feed_dict={x:np.reshape(test_x_batch[b],(1,image_size)),t:np.reshape(test_t_batch[b],(1,image_size))})
MSE.append(result_loss)
print(MSE)
test_result = sample[:960]
ans = image_merge(test_result)
plt.imshow(ans,cmap='gray')
plt.savefig('ans.png')
plt.close()
#for i in range(1):
#plt.imshow(np.reshape(train_t_batch[i],(25,30)),cmap='gray')
#plt.imshow(sample[i],cmap='gray')
#plt.close()
#ploting
plt.plot(list(range(epoch)),Loss)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.savefig('loss.png')
plt.close()
plt.bar(list(range(len(MSE))),MSE)
plt.xlabel('Slice')
plt.ylabel('loss')
plt.savefig('test_result.png')
#output_sample = y.eval(feed_dict={x:train_x_batch})
#expect_sample = t.eval(feed_dict={t:train_t_batch})
#sample_mse = mse.eval(feed_dict={x:train_x_batch,t:train_t_batch})
#sample_psnr = psnr.eval(feed_dict={x:train_x_batch,t:train_t_batch})
#Testing
#Output |
py | 7dfd6267f571c77db0d019e38e4aa3d1d8119c66 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import interface_ipv6_metric
class interface_ipv6(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-interface - based on the path /interface/ethernet/interface-eth-isis-conf/intf-isis/interface-isis/interface-ipv6. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__interface_ipv6_metric',)
_yang_name = 'interface-ipv6'
_rest_name = 'ipv6'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__interface_ipv6_metric = YANGDynClass(base=YANGListType("interface_ipv6_metric_level",interface_ipv6_metric.interface_ipv6_metric, yang_name="interface-ipv6-metric", rest_name="metric", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-ipv6-metric-level', extensions={u'tailf-common': {u'info': u'Interface ipv6 Metric for isis', u'cli-suppress-mode': None, u'callpoint': u'IsisInterfaceIpv6Metric', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'metric'}}), is_container='list', yang_name="interface-ipv6-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface ipv6 Metric for isis', u'cli-suppress-mode': None, u'callpoint': u'IsisInterfaceIpv6Metric', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'interface', u'ethernet', u'interface-eth-isis-conf', u'intf-isis', u'interface-isis', u'interface-ipv6']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ethernet', u'isis', u'ipv6']
def _get_interface_ipv6_metric(self):
"""
Getter method for interface_ipv6_metric, mapped from YANG variable /interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_ipv6/interface_ipv6_metric (list)
"""
return self.__interface_ipv6_metric
def _set_interface_ipv6_metric(self, v, load=False):
"""
Setter method for interface_ipv6_metric, mapped from YANG variable /interface/ethernet/interface_eth_isis_conf/intf_isis/interface_isis/interface_ipv6/interface_ipv6_metric (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface_ipv6_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface_ipv6_metric() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("interface_ipv6_metric_level",interface_ipv6_metric.interface_ipv6_metric, yang_name="interface-ipv6-metric", rest_name="metric", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-ipv6-metric-level', extensions={u'tailf-common': {u'info': u'Interface ipv6 Metric for isis', u'cli-suppress-mode': None, u'callpoint': u'IsisInterfaceIpv6Metric', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'metric'}}), is_container='list', yang_name="interface-ipv6-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface ipv6 Metric for isis', u'cli-suppress-mode': None, u'callpoint': u'IsisInterfaceIpv6Metric', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """interface_ipv6_metric must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("interface_ipv6_metric_level",interface_ipv6_metric.interface_ipv6_metric, yang_name="interface-ipv6-metric", rest_name="metric", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-ipv6-metric-level', extensions={u'tailf-common': {u'info': u'Interface ipv6 Metric for isis', u'cli-suppress-mode': None, u'callpoint': u'IsisInterfaceIpv6Metric', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'metric'}}), is_container='list', yang_name="interface-ipv6-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface ipv6 Metric for isis', u'cli-suppress-mode': None, u'callpoint': u'IsisInterfaceIpv6Metric', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='list', is_config=True)""",
})
self.__interface_ipv6_metric = t
if hasattr(self, '_set'):
self._set()
def _unset_interface_ipv6_metric(self):
self.__interface_ipv6_metric = YANGDynClass(base=YANGListType("interface_ipv6_metric_level",interface_ipv6_metric.interface_ipv6_metric, yang_name="interface-ipv6-metric", rest_name="metric", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-ipv6-metric-level', extensions={u'tailf-common': {u'info': u'Interface ipv6 Metric for isis', u'cli-suppress-mode': None, u'callpoint': u'IsisInterfaceIpv6Metric', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'metric'}}), is_container='list', yang_name="interface-ipv6-metric", rest_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface ipv6 Metric for isis', u'cli-suppress-mode': None, u'callpoint': u'IsisInterfaceIpv6Metric', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'alt-name': u'metric'}}, namespace='urn:brocade.com:mgmt:brocade-isis', defining_module='brocade-isis', yang_type='list', is_config=True)
interface_ipv6_metric = __builtin__.property(_get_interface_ipv6_metric, _set_interface_ipv6_metric)
_pyangbind_elements = {'interface_ipv6_metric': interface_ipv6_metric, }
|
py | 7dfd62dd294f3579cbf83fe7d9cee84c72a42983 | #!/usr/bin/env python
import json
import os
import strict_rfc3339
import argparse
import sys
import jsonschema as js
parser = argparse.ArgumentParser(description='This is a simple validator for JSON schema.')
parser.add_argument('-d', '--data', help='JSON data file', required=True)
parser.add_argument('-s', '--schema', help='JSON schema file', required=True)
args = parser.parse_args()
data_file_name = args.data
with open(data_file_name) as data_file:
data = json.load(data_file)
data_file.closed
schema_file_name = args.schema
with open(schema_file_name) as schema_file:
schema = json.load(schema_file)
print(schema)
schema_file.closed
# Defining a resolver for relative paths and schema issues, see https://github.com/Julian/jsonschema/issues/313
# and https://github.com/Julian/jsonschema/issues/274
sSchemaDir = os.path.dirname(os.path.abspath(schema_file_name))
oResolver = js.RefResolver(base_uri = 'file://' + sSchemaDir + '/', referrer = schema)
print(oResolver)
print("schemaDir: " + sSchemaDir)
print("schema_file_name: " + schema_file_name)
try:
js.validate(data, schema, format_checker=js.FormatChecker(), resolver=oResolver)
print("'%s' successfully validated against '%s'" % (data_file_name, schema_file_name))
except js.ValidationError as e:
print(e.message)
print(e)
raise SystemExit("Error in validation.")
except js.SchemaError as e:
print(e.message)
print(e)
raise SystemExit("Error in schema validation.")
|
py | 7dfd633bcb4d2200bddbe81e4866cf234ee88f68 | from mlxtend.frequent_patterns import apriori
import numpy as np
import itertools
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, accuracy_score
def cover(X, itemset): # could be called "rows_covered_by_itemset"
"""
Returns the rows in X which satisfy the itemset
An itemset is satisfied when all elements of the itemset are evaluated to 1 (True)
Input:
X: pandas DataFrame (all one-hot encoded)
itemset: an iterable of column names representing an itemset
Returns:
X (subset): pandas DataFrame whose rows satisfy all itemset elements (have value 1)
"""
return X[(X[itemset]==1).all(axis=1)]
def overlap(X, itemset1, itemset2): # could be called "rows_covered_by_intersection_of_itemsets"
"""
Returns the rows in X which satisfy BOTH itemset1 and itemset2 (their intersection)
An itemset is satisfied when all elements of the itemset are evaluated to 1 (True)
Input:
X: pandas DataFrame (all one-hot encoded)
itemset1: an iterable of column names representing an itemset
itemset2: an iterable of column names representing an itemset
Returns:
X (subset): pandas DataFrame whose rows satisfy all itemset elements (have value 1)
"""
cover1 = cover(X, itemset1)
cover2 = cover(X, itemset2)
overlap_idx = cover1.index.intersection(cover2.index)
return X.loc[overlap_idx]
def correct_cover(X, y, rule):
"""
Returns elements of X (and y) which satisfy the rule (item, class)
Input:
X: pandas DataFrame (all one-hot encoded) of input data
y: pandas Series of categorical target variable (ground truth)
rule: element of pandas DataFrame with `item` (representing a rule)
and `class` (representing a prediction)
Returns:
X_cover, y_cover: pandas DataFrame and Series that are correctly covered by the rule
"""
item = rule['item']
pred = rule['class']
X_positive = X.loc[y == pred] # rows where y == class
X_cover = cover(X_positive, item) # rows covered by the rule
y_cover = y[X_cover.index]
return X_cover, y_cover
def incorrect_cover(X, y, rule):
"""
Returns the incorrect cover, defined as the set difference of cover(r) \ correct_cover(r)
Where `r` is the rule
Input:
X: pandas DataFrame (all one-hot encoded) of input data
y: pandas Series of categorical target variable (ground truth)
rule: element of pandas DataFrame with `item` (representing a rule)
and `class` (representing a prediction)
Returns:
X_incorrect, y_incorrect: pandas DataFrame and Series that are incorrectly covered by rule
"""
item = rule['item']
pred = rule['class']
X_negative = X.loc[y != pred] # rows where y == class
X_cover = cover(X_negative, item) # rows covered by the rule
y_cover = y[X_cover.index]
return X_cover, y_cover
class DecisionSet():
"""
Implementation of Decision Sets
See here: https://www-cs-faculty.stanford.edu/people/jure/pubs/interpretable-kdd16.pdf
"""
def __init__(self, min_support = 0.01, lambdas = [186, 732, 160, 950, 800, 752, 281]):
"""
Initializes a DecisionSet
Input:
min_support: float in (0, 1) denoting minimum proportion of rows an itemset can cover
lambdas: len=7 iterable; values to use for weighting the loss function (lambdas in paper)
default [186, 732, 160, 950, 800, 752, 281], found from random search on Titanic Dataset
"""
self.min_support = min_support
self.lambdas = lambdas
assert len(self.lambdas) == 7, 'lambdas must be exactly length 7!'
# `d` (aka delta) values are set by the algorithm in paper
# optimality of submodular approach is guaranteed for these values
# assume that we do not want to change them, so they are hard-coded
self.d1 = 1/3
self.d2 = 1/3
self.d3 = -1
self.cache_exists = False
def fit(self, X, y, max_est_iter=10):
"""
Fits a DecisionSet to a dataset X (one-hot encoded) with labels y (categorical)
Input:
X: pandas DataFrame containing one-hot encoded (True/False or 1/0) values.
X should have shape (n, p) where n is number of points and p is number of features
y: pandas Series containing class labels
max_est_iter: maximum iterations for estimating w
"""
if not self.cache_exists:
self.cache(X, y)
self.L_max = np.max(self.itemsets.apply(lambda x: len(x)))
self.classes = y.unique()
self.default_pred = y.value_counts().idxmax()
self.max_est_iter = max_est_iter
# two runs each with different d' (d2 vs d3)
dset1, obj_val1, dset2, obj_val2 = self.smooth_local_search(X, y, max_est_iter=max_est_iter)
# take best of both runs
if obj_val1 > obj_val2:
self.decision_set = dset1
else:
self.decision_set = dset2
# sort by accuracy score for tie-breaking (and general presentation)
print('Calculating individual rule scores')
acc_list = []
for i, r in self.decision_set.iterrows():
xcov = cover(X, r['item'])
ycov = y.loc[xcov.index]
pred = r['class'] * np.ones_like(ycov.values)
if len(xcov) > 0:
acc = accuracy_score(ycov, pred)
acc_list.append(acc)
self.decision_set['acc'] = acc_list
self.decision_set = self.decision_set.sort_values(by='acc', ascending=False) # prioritize accuracy
self._remove_duplicate_itemsets() # postprocess for visual appeal (does not affect accuracy or model performance)
return self
def predict(self, X):
"""
make predictions for every element of X (pandas DataFrame)
uses highest-accuracy rule that applies, otherwise uses default
"""
try:
ds = self.decision_set
except:
raise RuntimeError('Must fit decision set before predicting!')
items = list(self.decision_set['item'])
classes = list(self.decision_set['class'])
preds = []
for i, x in X.iterrows():
added = False
for r, c in zip(items, classes): # sorted by some notion of how good the rule is (breaks ties)
rule_value = x.loc[r]
if (rule_value==1).all():
preds.append(c)
added = True
break
if not added:
preds.append(self.default_pred) # if no match, use default
return np.array(preds)
def smooth_local_search(self, X, y, max_est_iter=10):
"""
Performs smooth local search to optimize decision set
"""
# initialize an empty decision set
A = pd.DataFrame([], columns=['item', 'class'])
# sample a decision set with no bias
samp = self._sample_decision_set(self.domain, self.domain, 0) # line 4 of SLS algorithm
# estimate the objective with the sample
opt = self._objective(X, y, samp)
self.error_bound = opt / (len(self.domain) ** 2)
count = 0
while True:
print('Estimating w...')
w = self._estimate_w(A, X, y, maxiter=max_est_iter)
A, add_done = self._add_elements(self.domain, A, w)
if not add_done:
count += 1
continue # go back to top of loop
else:
A, remove_done = self._remove_elements(self.domain, A, w)
if not remove_done:
count += 1
continue
else:
break
# we run with d' = 1/3 and d' = -1 according to the paper (Section 4.2)
dset1 = self._sample_decision_set(self.domain, A, self.d2)
obj1 = self._objective(X, y, dset1)
dset2 = self._sample_decision_set(self.domain, A, self.d3)
obj2 = self._objective(X, y, dset2)
return dset1, obj1, dset2, obj2
def cache(self, X, y):
"""
Caches various quantities to improve speed of main algorithm
"""
print('Computing cache...')
# cache itemsets & domain
self.itemsets = self.mine_itemsets(X)
self.domain = self._get_domain(self.itemsets, y)
print('Rules in domain: {}'.format(len(self.domain)))
# cache the covers (just cache the length)
self.cover_cache = {}
for i, r in self.itemsets.iteritems():
self.cover_cache[r] = len(cover(X, r))
print('Done caching covers')
# cache the overlaps (just cache the length)
self.overlap_cache = {}
for i, ri in self.itemsets.iteritems():
for j, rj in self.itemsets.loc[i:].iteritems():
ov = len(overlap(X, ri, rj))
self.overlap_cache[(ri,rj)] = ov
self.overlap_cache[(rj,ri)] = ov # save both orders for ease of use
print('Done caching overlaps')
# cache correct cover indices as python set
self.correct_cover_cache = {}
for i, r in self.domain.iterrows():
xcov, ycov = correct_cover(X, y, r)
self.correct_cover_cache[(r['item'], r['class'])] = set(xcov.index)
print('Done caching correct cover')
# cache incorrect cover (just cache the length)
self.incorrect_cover_cache = {}
for i, r in self.domain.iterrows():
self.incorrect_cover_cache[(r['item'], r['class'])] = len(incorrect_cover(X, y, r)[0])
print('Done caching incorrect cover')
self.cache_exists = True
def get_cache(self):
""" helper to get a cache that has been fit """
if self.cache_exists:
return {
'itemsets': self.itemsets,
'domain': self.domain,
'cover': self.cover_cache,
'overlap': self.overlap_cache,
'correct_cover': self.correct_cover_cache,
'incorrect_cover': self.incorrect_cover_cache,
}
else:
raise RuntimeError("cache_exists False. Must set or compute cache first.")
def set_cache(self, cache_dict):
""" helper to set the cache according to a cache dict """
self.itemsets = cache_dict['itemsets']
self.domain = cache_dict['domain']
self.cover_cache = cache_dict['cover']
self.overlap_cache = cache_dict['overlap']
self.correct_cover_cache = cache_dict['correct_cover']
self.incorrect_cover_cache = cache_dict['incorrect_cover']
self.cache_exists = True
def _add_elements(self, domain, A, w):
"""
implements lines 9-12 of algorithm 1
"""
pre_size = len(A)
added = 0
for i in range(len(domain)):
r = domain.iloc[i]
in_A = (A == r).all(axis=1).any()
if not in_A and w[i] > 2 * self.error_bound:
A = A.append(r)
added += 1
print('Executed add elements, size of A: {} -> {}'.format(pre_size, len(A)))
if added == 0:
return A, True
else:
return A, False
def _remove_elements(self, domain, A, w):
"""
implements lines 13-15 of algorithm 1
"""
pre_size = len(A)
removed = 0
for i in range(len(domain)):
r = domain.iloc[i]
in_A = (A == r).all(axis=1).any()
if in_A and w[i] < -2 * self.error_bound:
to_drop = A.loc[(A == r).all(axis=1)]
A = A.drop(to_drop.index)
removed += 1
print('Executed remove elements, size of A: {} -> {}'.format(pre_size, len(A)))
if removed == 0:
return A, True
else:
return A, False
def _sample_decision_set(self, domain, decision_set, delta):
"""
performs sampling of decision set according to algorithm in paper (Definition 8)
samples with bias (if already in decision_set) from domain
"""
# bias towards within decision_set / domain, depending on delta
p_in = (1 + delta) / 2
p_out = (1 - delta) / 2
# sample from decision set w.p. p_in
R = decision_set.shape[0]
in_mask = np.random.random(size=R) < p_in
in_samp = decision_set[in_mask]
# sample from domain \ decision_set (set difference) w.p. p_out
out_domain = domain.loc[domain.index.difference(decision_set.index)] # domain \ decision_set
S = out_domain.shape[0]
out_mask = np.random.random(size=S) < p_out
out_samp = out_domain[out_mask]
sample = pd.concat([in_samp, out_samp])
return sample
def _estimate_w(self, A, X, y, maxiter=10):
"""
performs the w estimation from line 5 of algorithm
"""
w = []
for i, r in self.domain.iterrows():
std_error = float('inf')
diffs = []
j = 0
while std_error > self.error_bound and j < maxiter:
d_samp = self._sample_decision_set(self.domain, A, self.d1)
# remove r from sample
d_samp_drop = d_samp.append(r).drop_duplicates(keep=False)
# add it when we know it won't be duplicate
d_samp_with = d_samp_drop.append(r)
obj_with = self._objective(X, y, d_samp_with)
obj_drop = self._objective(X, y, d_samp_drop)
diffs.append(obj_with - obj_drop)
if len(diffs) >= 2:
std_error = np.std(diffs)
j += 1
w.append(np.mean(diffs))
return w
def _objective(self, X, y, decision_set):
"""
Computes the objective outlined in the Decision Set Paper
"""
N = len(X)
R = len(decision_set)
S = len(self.itemsets)
# interpretability: less rules (possible for R > S ?)
f1 = S - R
# interpretability: short rules
f2 = self.L_max * S - sum([len(r) for r in list(decision_set['item'])])
# non-overlap: discourage overlap among rules
f3, f4 = self._non_overlap_objective(X, decision_set)
# coverage: at least one rule for each class
f5 = self._coverage_objective(decision_set)
# accuracy: reduce incorrect-cover (precision)
f6 = self._precision_objective(X, y, decision_set)
# accuracy: cover data points with at least one rule (recall)
f7 = self._recall_objective(X, y, decision_set)
objectives = [f1, f2, f3, f4, f5, f6, f7]
# final objective: weighted sum of individuals
return sum([o * l for o, l in zip(objectives, self.lambdas)])
def _non_overlap_objective(self, X, decision_set):
"""
perform nested for-loop summation for calculating the non-overlap objective
"""
N = len(X)
R = len(decision_set)
S = len(self.itemsets)
# calculate overlap for same and different classes separately
same_class_overlap, diff_class_overlap = 0, 0
items = list(decision_set['item'])
classes = list(decision_set['class'])
for i, ri in enumerate(items):
for j, rj in enumerate(items[i:]):
num_overlap = self.overlap_cache[(ri, rj)]
if classes[i] == classes[j]:
same_class_overlap += num_overlap
else:
diff_class_overlap += num_overlap
# same-class objective ('f3' in paper)
f3 = N * (S ** 2) - same_class_overlap
# diff-class objective ('f4' in paper)
f4 = N * (S ** 2) - diff_class_overlap
return f3, f4
def _coverage_objective(self, decision_set):
"""
calculates coverage objective (f5 in paper)
"""
obj = 0
for cls in self.classes:
have_rule = (decision_set['class']==cls).any(axis=0)
obj += 1 if have_rule else 0
return obj
def _precision_objective(self, X, y, decision_set):
"""
calculates precision objective (f6 in paper)
"""
N = len(X)
S = len(self.itemsets)
R = len(decision_set)
items = list(decision_set['item'])
classes = list(decision_set['class'])
penalty = 0
for r, c in zip(items, classes):
penalty += self.incorrect_cover_cache[(r, c)]
return N * (S ** 2) - penalty
def _recall_objective(self, X, y, decision_set):
"""
calculates recall objective (f7 in paper)
"""
items = list(decision_set['item'])
classes = list(decision_set['class'])
idx = self.correct_cover_cache[(items[0], classes[0])]
idx = idx.union(*[self.correct_cover_cache[(r, c)] for r, c in zip(items[1:], classes[1:])])
return len(idx)
def _remove_duplicate_itemsets(self):
"""
removes any duplicate itemsets (i.e. itemsets which predict different classes)
keeps the first entry, which is assumed to have already been sorted by some notion of score
i.e. keeps the "best"
"""
self.decision_set = self.decision_set.loc[self.decision_set['item'].drop_duplicates(keep='first').index]
def mine_itemsets(self, X):
"""
Performs frequent itemset mining using Apriori Algorithm
See here: http://rasbt.github.io/mlxtend/user_guide/frequent_patterns/apriori/
Input:
X: pandas DataFrame, one-hot encoded
Output:
itemsets: pandas DataFrame with support and itemsets columns
"""
itemsets = apriori(X, min_support=self.min_support, use_colnames=True)
return itemsets.itemsets # apriori returns the support for each item. just take itemsets
def _get_domain(self, itemsets, y):
"""
calculate cartesian product of itemset and classes, returns a dataframe
"""
item_cross_class = itertools.product(itemsets, y.unique())
domain = pd.DataFrame(item_cross_class, columns=['item', 'class'])
return domain
|
py | 7dfd64a87da1312bc4c181c7dd13b89146ea1063 | #!/usr/bin/env python2.6
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
import logging
from StatusCheck import StatusCheck
import AmbariConfig
from StackVersionsFileHandler import StackVersionsFileHandler
from ActualConfigHandler import ActualConfigHandler
logger = logging.getLogger()
class LiveStatus:
SERVICES = [
"HDFS", "MAPREDUCE", "GANGLIA", "HBASE",
"NAGIOS", "ZOOKEEPER", "OOZIE", "HCATALOG",
"KERBEROS", "TEMPLETON", "HIVE", "WEBHCAT",
"YARN", "MAPREDUCE2", "FLUME", "SPARK", "SHARK"
]
COMPONENTS = [
{"serviceName" : "HDFS",
"componentName" : "DATANODE"},
{"serviceName" : "HDFS",
"componentName" : "NAMENODE"},
{"serviceName" : "HDFS",
"componentName" : "SECONDARY_NAMENODE"},
{"serviceName" : "MAPREDUCE",
"componentName" : "JOBTRACKER"},
{"serviceName" : "MAPREDUCE",
"componentName" : "TASKTRACKER"},
{"serviceName" : "GANGLIA",
"componentName" : "GANGLIA_SERVER"},
{"serviceName" : "GANGLIA",
"componentName" : "GANGLIA_MONITOR"},
{"serviceName" : "HBASE",
"componentName" : "HBASE_MASTER"},
{"serviceName" : "HBASE",
"componentName" : "HBASE_REGIONSERVER"},
{"serviceName" : "NAGIOS",
"componentName" : "NAGIOS_SERVER"},
{"serviceName" : "FLUME",
"componentName" : "FLUME_SERVER"},
{"serviceName" : "SPARK",
"componentName" : "SPARK_SERVER"},
{"serviceName" : "SPARK",
"componentName" : "SPARK_WORKER"},
{"serviceName" : "SHARK",
"componentName" : "SHARK_SERVER"},
{"serviceName" : "ZOOKEEPER",
"componentName" : "ZOOKEEPER_SERVER"},
{"serviceName" : "OOZIE",
"componentName" : "OOZIE_SERVER"},
{"serviceName" : "HCATALOG",
"componentName" : "HCATALOG_SERVER"},
{"serviceName" : "KERBEROS",
"componentName" : "KERBEROS_SERVER"},
{"serviceName" : "HIVE",
"componentName" : "HIVE_SERVER"},
{"serviceName" : "HIVE",
"componentName" : "HIVE_METASTORE"},
{"serviceName" : "HIVE",
"componentName" : "MYSQL_SERVER"},
{"serviceName" : "WEBHCAT",
"componentName" : "WEBHCAT_SERVER"},
{"serviceName" : "YARN",
"componentName" : "RESOURCEMANAGER"},
{"serviceName" : "YARN",
"componentName" : "NODEMANAGER"},
{"serviceName" : "MAPREDUCE2",
"componentName" : "HISTORYSERVER"},
]
LIVE_STATUS = "STARTED"
DEAD_STATUS = "INSTALLED"
def __init__(self, cluster, service, component, globalConfig, config):
self.cluster = cluster
self.service = service
self.component = component
self.globalConfig = globalConfig
versionsFileDir = config.get('agent', 'prefix')
self.versionsHandler = StackVersionsFileHandler(versionsFileDir)
self.actualConfigHandler = ActualConfigHandler(config)
def belongsToService(self, component):
#TODO: Should also check belonging of server to cluster
return component['serviceName'] == self.service
# Live status was stripped from heartbeat after revision e1718dd
def build(self):
global SERVICES, COMPONENTS, LIVE_STATUS, DEAD_STATUS
statusCheck = StatusCheck(AmbariConfig.servicesToPidNames,
AmbariConfig.pidPathesVars, self.globalConfig,
AmbariConfig.servicesToLinuxUser)
livestatus = None
for component in self.COMPONENTS:
if component["serviceName"] == self.service and component["componentName"] == self.component:
serviceStatus = statusCheck.getStatus(component["componentName"])
if serviceStatus is None:
logger.warn("There is no service to pid mapping for " + component["componentName"])
status = self.LIVE_STATUS if serviceStatus else self.DEAD_STATUS
livestatus ={"componentName" : component["componentName"],
"msg" : "",
"status" : status,
"clusterName" : self.cluster,
"serviceName" : self.service,
"stackVersion": self.versionsHandler.
read_stack_version(component["componentName"])
}
active_config = self.actualConfigHandler.read_actual_component(component['componentName'])
if not active_config is None:
livestatus['configurationTags'] = active_config
break
logger.debug("The live status for component " + str(self.component) +\
" of service " + str(self.service) + " is " + str(livestatus))
return livestatus
def main(argv=None):
for service in SERVICES:
livestatus = LiveStatus('', service)
print json.dumps(livestatus.build())
if __name__ == '__main__':
main()
|
py | 7dfd65ba4b5f020a73f303874d9f9577c52a82f4 | from __future__ import print_function
import pickle
from settings import *
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
import json
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets']
# The ID and range of a sample spreadsheet.
SAMPLE_SPREADSHEET_ID = '1SN2rcS9cSwuMT-MjIRHfR9yZPKi2bShp9-WEBv5Ri3s'
CRED_FILE = os.path.join(GAME_FOLDER,'credentials.json')
TOK_FILE = os.path.join(GAME_FOLDER,'token.pickle')
def winner_up(name,level,score):
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOK_FILE):
with open(TOK_FILE, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CRED_FILE, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(TOK_FILE, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
listofstuff = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,range='A1:D1').execute()['values'][0]
listofstuff[1] = int(listofstuff[1])
listofstuff[2] = int(listofstuff[2])
if level + score > listofstuff[1] + int(float(listofstuff[2]) / 3.0):
values = [[name,str(level),str(score)]]
body = {'values': values}
result = sheet.values().update(spreadsheetId=SAMPLE_SPREADSHEET_ID,valueInputOption = 'USER_ENTERED', range='A1:D1', body=body).execute()
retval = [name,str(level),str(score)]
else:
retval = [listofstuff[0],str(listofstuff[1]),str(listofstuff[2])]
return retval
def winner():
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists(TOK_FILE):
with open(TOK_FILE, 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
CRED_FILE, SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(TOK_FILE, 'wb') as token:
pickle.dump(creds, token)
service = build('sheets', 'v4', credentials=creds)
# Call the Sheets API
sheet = service.spreadsheets()
listofstuff = sheet.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,range='A1:D1').execute()['values'][0]
listofstuff[1] = int(listofstuff[1])
listofstuff[2] = int(listofstuff[2])
retval = [listofstuff[0],str(listofstuff[1]),str(listofstuff[2])]
return retval
|
py | 7dfd65c1c29e1c04e2c5d6efe19c257b69cd2ac4 | import sys
sys.setrecursionlimit(1000000)
def sol():
sys.stdin = open("./1520/input.txt")
input = sys.stdin.readline
M, N = map(int, input().split())
heights = [list(map(int, input().split())) for _ in range(M)]
dp = [[-1] * N for _ in range(M)]
dr = [0, 0, 1, -1]
dc = [1, -1, 0, 0]
print(dfs(M, N, heights, dp, dr, dc, 0, 0))
def dfs(M, N, heights, dp, dr, dc, r, c):
if r == M - 1 and c == N - 1:
return 1
if dp[r][c] != -1:
return dp[r][c]
dp[r][c] = 0
for i in range(4):
nr = r + dr[i]
nc = c + dc[i]
if 0 <= nr < M and 0 <= nc < N:
if heights[nr][nc] < heights[r][c]:
dp[r][c] += dfs(M, N, heights, dp, dr, dc, nr, nc)
return dp[r][c]
if __name__ == "__main__":
sol()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.