id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/JPype1-1.4.1-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl/jpype/_jobject.py | import _jpype
__all__ = ['JObject']
class JObject(_jpype._JObject, internal=True):
""" Base class for all object instances.
It can be used to test if an object is a Java object instance with
``isinstance(obj, JObject)``.
Calling ``JObject`` as a function can be used to covert or cast to
specific Java type. It will box primitive types and supports an
option type to box to.
This wrapper functions in three ways.
- If the no type is given the object is automatically
cast to type best matched given the value. This can be used
to create a boxed primitive. ``JObject(JInt(i))``
- If the type is a primitve, the object will be the boxed type of that
primitive. ``JObject(1, JInt)``
- If the type is a Java class and the value is a Java object, the
object will be cast to the Java class and will be an exact match to
the class for the purposes of matching arguments. If the object
is not compatible, an exception will be raised.
Args:
value: The value to be cast into an Java object.
type(Optional, type): The type to cast into.
Raises:
TypeError: If the object cannot be cast to the specified type, or
the requested type is not a Java class or primitive.
"""
def __new__(cls, *args, **kwargs):
if len(args) == 0:
return _jpype._java_lang_Object()
return _JObjectFactory(*args, **kwargs)
def _getDefaultJavaObject(obj):
""" Determine the type of the object based the type of a value.
Python primitives - lookup the type in the table
Java primitive - lookup boxed type in the table
Java objects - just use their type directly
"""
tp = type(obj)
# handle Python types and Java primitives
try:
return _jpype._object_classes[tp]
except KeyError:
pass
# handle Class wrappers
if isinstance(tp, _jpype._JClass):
return tp
# handle JProxy instances
try:
return obj.__javaclass__
except AttributeError:
pass
raise TypeError(
"Unable to determine the default type of `{0}`".format(tp.__name__))
def _JObjectFactory(v=None, tp=None):
""" Creates a Java object.
If not specified type is determined based on the object.
If type type is specified then then it tried to box it.
"""
if tp is None:
# Automatically determine based on the value
tp = _getDefaultJavaObject(v)
elif isinstance(tp, str):
tp = _jpype.JClass(tp)
if tp in _jpype._object_classes:
if not isinstance(tp, _jpype.JClass):
import warnings
warnings.warn("Using JObject with a Python type is deprecated.",
category=DeprecationWarning, stacklevel=3)
tp = _jpype._object_classes[tp]
# Given a Java class
if isinstance(tp, _jpype._JClass):
return tp._cast(v)
raise TypeError("Invalid type conversion to %s requested." % tp)
# Hook up module resources
_jpype.JObject = JObject | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojo/dnd/move.js | if(!dojo._hasResource["dojo.dnd.move"]){
dojo._hasResource["dojo.dnd.move"]=true;
dojo.provide("dojo.dnd.move");
dojo.require("dojo.dnd.Mover");
dojo.require("dojo.dnd.Moveable");
dojo.declare("dojo.dnd.move.constrainedMoveable",dojo.dnd.Moveable,{constraints:function(){
},within:false,markupFactory:function(_1,_2){
return new dojo.dnd.move.constrainedMoveable(_2,_1);
},constructor:function(_3,_4){
if(!_4){
_4={};
}
this.constraints=_4.constraints;
this.within=_4.within;
},onFirstMove:function(_5){
var c=this.constraintBox=this.constraints.call(this,_5);
c.r=c.l+c.w;
c.b=c.t+c.h;
if(this.within){
var mb=dojo.marginBox(_5.node);
c.r-=mb.w;
c.b-=mb.h;
}
},onMove:function(_6,_7){
var c=this.constraintBox,s=_6.node.style;
s.left=(_7.l<c.l?c.l:c.r<_7.l?c.r:_7.l)+"px";
s.top=(_7.t<c.t?c.t:c.b<_7.t?c.b:_7.t)+"px";
}});
dojo.declare("dojo.dnd.move.boxConstrainedMoveable",dojo.dnd.move.constrainedMoveable,{box:{},markupFactory:function(_8,_9){
return new dojo.dnd.move.boxConstrainedMoveable(_9,_8);
},constructor:function(_a,_b){
var _c=_b&&_b.box;
this.constraints=function(){
return _c;
};
}});
dojo.declare("dojo.dnd.move.parentConstrainedMoveable",dojo.dnd.move.constrainedMoveable,{area:"content",markupFactory:function(_d,_e){
return new dojo.dnd.move.parentConstrainedMoveable(_e,_d);
},constructor:function(_f,_10){
var _11=_10&&_10.area;
this.constraints=function(){
var n=this.node.parentNode,s=dojo.getComputedStyle(n),mb=dojo._getMarginBox(n,s);
if(_11=="margin"){
return mb;
}
var t=dojo._getMarginExtents(n,s);
mb.l+=t.l,mb.t+=t.t,mb.w-=t.w,mb.h-=t.h;
if(_11=="border"){
return mb;
}
t=dojo._getBorderExtents(n,s);
mb.l+=t.l,mb.t+=t.t,mb.w-=t.w,mb.h-=t.h;
if(_11=="padding"){
return mb;
}
t=dojo._getPadExtents(n,s);
mb.l+=t.l,mb.t+=t.t,mb.w-=t.w,mb.h-=t.h;
return mb;
};
}});
dojo.dnd.move.constrainedMover=function(fun,_12){
dojo.deprecated("dojo.dnd.move.constrainedMover, use dojo.dnd.move.constrainedMoveable instead");
var _13=function(_14,e,_15){
dojo.dnd.Mover.call(this,_14,e,_15);
};
dojo.extend(_13,dojo.dnd.Mover.prototype);
dojo.extend(_13,{onMouseMove:function(e){
dojo.dnd.autoScroll(e);
var m=this.marginBox,c=this.constraintBox,l=m.l+e.pageX,t=m.t+e.pageY;
l=l<c.l?c.l:c.r<l?c.r:l;
t=t<c.t?c.t:c.b<t?c.b:t;
this.host.onMove(this,{l:l,t:t});
},onFirstMove:function(){
dojo.dnd.Mover.prototype.onFirstMove.call(this);
var c=this.constraintBox=fun.call(this);
c.r=c.l+c.w;
c.b=c.t+c.h;
if(_12){
var mb=dojo.marginBox(this.node);
c.r-=mb.w;
c.b-=mb.h;
}
}});
return _13;
};
dojo.dnd.move.boxConstrainedMover=function(box,_16){
dojo.deprecated("dojo.dnd.move.boxConstrainedMover, use dojo.dnd.move.boxConstrainedMoveable instead");
return dojo.dnd.move.constrainedMover(function(){
return box;
},_16);
};
dojo.dnd.move.parentConstrainedMover=function(_17,_18){
dojo.deprecated("dojo.dnd.move.parentConstrainedMover, use dojo.dnd.move.parentConstrainedMoveable instead");
var fun=function(){
var n=this.node.parentNode,s=dojo.getComputedStyle(n),mb=dojo._getMarginBox(n,s);
if(_17=="margin"){
return mb;
}
var t=dojo._getMarginExtents(n,s);
mb.l+=t.l,mb.t+=t.t,mb.w-=t.w,mb.h-=t.h;
if(_17=="border"){
return mb;
}
t=dojo._getBorderExtents(n,s);
mb.l+=t.l,mb.t+=t.t,mb.w-=t.w,mb.h-=t.h;
if(_17=="padding"){
return mb;
}
t=dojo._getPadExtents(n,s);
mb.l+=t.l,mb.t+=t.t,mb.w-=t.w,mb.h-=t.h;
return mb;
};
return dojo.dnd.move.constrainedMover(fun,_18);
};
dojo.dnd.constrainedMover=dojo.dnd.move.constrainedMover;
dojo.dnd.boxConstrainedMover=dojo.dnd.move.boxConstrainedMover;
dojo.dnd.parentConstrainedMover=dojo.dnd.move.parentConstrainedMover;
} | PypiClean |
/Ageas-0.0.1a6.tar.gz/Ageas-0.0.1a6/ageas/classifier/cnn_1d.py | import os
import torch
import itertools
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as func
import ageas.classifier as classifier
class Limited(nn.Module):
"""
Defining a CNN model treating input as 1D data
with given hyperparameters
Layer set number limited to max == 3
"""
def __init__(self, id, param, n_class = 2):
# Initialization
super().__init__()
self.id = id
self.model_type = 'CNN_1D_Limited'
self.num_layers = param['num_layers']
self.loss_func = nn.CrossEntropyLoss()
# Layer set 1
self.pool = nn.MaxPool1d(param['maxpool_kernel_size'])
self.conv = nn.Conv1d(
1,
param['conv_kernel_num'],
param['conv_kernel_size']
)
# Layer set 2
self.pool1 = nn.MaxPool1d(param['maxpool_kernel_size'])
self.conv1 = nn.Conv1d(
param['conv_kernel_num'],
param['conv_kernel_num'],
param['conv_kernel_size']
)
# Layer set 3
self.pool2 = nn.MaxPool1d(param['maxpool_kernel_size'])
self.conv2 = nn.Conv1d(
param['conv_kernel_num'],
param['conv_kernel_num'],
param['conv_kernel_size']
)
### Trying to avoid Lazy module since it's under development ###
### But so far it's working just fine, so still using lazy module ###
# flattenLength = int(featureNum / pow(maxpool_kernel_size, num_layers))
# self.dense = nn.Linear(flattenLength, densed_size)
### -------------------------------------------------------- ###
self.dense = nn.LazyLinear(param['densed_size'])
self.decision = nn.Linear(param['densed_size'], n_class)
self.optimizer = optim.SGD(self.parameters(), param['learning_rate'])
# Overwrite the forward function in nn.Module
def forward(self, input):
input = self.pool(func.relu(self.conv(input)))
if self.num_layers > 1:
input = self.pool1(func.relu(self.conv1(input)))
if self.num_layers > 2:
input = self.pool2(func.relu(self.conv2(input)))
if self.num_layers > 3:
raise classifier.Error('CNN Model with more than 3 layer sets')
input = torch.flatten(input, start_dim = 1)
input = func.relu(self.dense(input))
input = func.softmax(self.decision(input), dim = 1)
return input
class Unlimited(nn.Module):
"""
Defining a CNN model treating input as 1D data
with given hyperparameters
"""
def __init__(self, id, param, n_class = 2):
# Initialization
super().__init__()
self.id = id
self.model_type = 'CNN_1D_Unlimited'
self.num_layers = param['num_layers']
self.loss_func = nn.CrossEntropyLoss()
self.conv = nn.Conv1d(
1,
param['conv_kernel_num'],
param['conv_kernel_size']
)
self.convMore = nn.Conv1d(
param['conv_kernel_num'],
param['conv_kernel_num'],
param['conv_kernel_size']
)
self.pool = nn.MaxPool1d(param['maxpool_kernel_size'])
### Trying to avoid Lazy module since it's under development ###
### But so far it's working just fine, so still using lazy module ###
# flattenLength = int(featureNum / pow(maxpool_kernel_size, num_layers))
# self.dense = nn.Linear(flattenLength, densed_size)
### -------------------------------------------------------- ###
self.dense = nn.LazyLinear(param['densed_size'])
self.decision = nn.Linear(param['densed_size'], n_class)
self.optimizer = optim.SGD(self.parameters(), param['learning_rate'])
# Overwrite the forward function in nn.Module
def forward(self, input):
input = self.pool(func.relu(self.conv(input)))
for i in range(self.num_layers - 1):
input = self.pool(func.relu(self.convMore(input)))
input = torch.flatten(input, start_dim = 1)
input = func.relu(self.dense(input))
input = func.softmax(self.decision(input), dim = 1)
return input
class Make(classifier.Make_Template):
"""
Analysis the performances of CNN based approaches
with different hyperparameters
Find the top settings to build CNN
"""
# Perform classifier training process for given times
def train(self, dataSets, test_split_set):
testData = classifier.reshape_tensor(dataSets.dataTest)
testLabel = dataSets.labelTest
num_features = len(dataSets.dataTest[0])
for id in self.configs:
if self.configs[id]['config']['num_layers'] < 3:
model = Limited(id, self.configs[id]['config'])
else:
model = Unlimited(id, self.configs[id]['config'])
epoch = self.configs[id]['epoch']
batch_size = self.configs[id]['batch_size']
self._train_torch(epoch, batch_size, model, dataSets)
accuracy = self._evaluate_torch(
model,
testData,
testLabel,
test_split_set
)
self.models.append([model, accuracy]) | PypiClean |
/FotoKilof-4.3.3-py3-none-any.whl/fotokilof/ini_save.py | import configparser
import common
import log
def save(ini_data):
""" save values into INI file """
# extract data
file_ini = ini_data[0]
main = ini_data[1]
resize = ini_data[2]
text = ini_data[3]
rotate = ini_data[4]
crop = ini_data[5]
border = ini_data[6]
color = ini_data[7]
normalize = ini_data[8]
contrast = ini_data[9]
mirror = ini_data[10]
vignette = ini_data[11]
logo = ini_data[12]
# content preparing
config = configparser.ConfigParser()
# main
config.add_section(main['section'])
config.set(main['section'], 'path', main['path'])
config.set(main['section'], 'work_dir', main['work_dir'])
config.set(main['section'], 'file_dir', str(main['file_dir']))
config.set(main['section'], 'exif', str(main['exif']))
config.set(main['section'], 'histograms', str(main['histograms']))
config.set(main['section'], 'preview_orig', main['preview_orig'])
config.set(main['section'], 'preview_new', main['preview_new'])
config.set(main['section'], 'log', main['log'])
# resize
config.add_section(resize['section'])
config.set(resize['section'], 'on', str(resize['on']))
config.set(resize['section'], 'resize', str(resize['resize']))
config.set(resize['section'], 'size_pixel_x', resize['size_pixel_x'])
config.set(resize['section'], 'size_pixel_y', resize['size_pixel_y'])
config.set(resize['section'], 'size_percent', resize['size_percent'])
# text
config.add_section(text['section'])
config.set(text['section'], 'on', str(text['on']))
config.set(text['section'], 'inout', str(text['inout']))
config.set(text['section'], 'text', text['text'])
config.set(text['section'], 'gravity', text['gravity'])
config.set(text['section'], 'gravity_onoff', str(text['gravity_onoff']))
config.set(text['section'], 'font', text['font'])
config.set(text['section'], 'size', text['size'])
config.set(text['section'], 'color', text['color'])
config.set(text['section'], 'box', str(text['box']))
config.set(text['section'], 'box_color', text['box_color'])
config.set(text['section'], 'x', text['x'])
config.set(text['section'], 'y', text['y'])
config.set(text['section'], 'text_rotate', str(text['text_rotate']))
config.set(text['section'], 'text_rotate_own', text['text_rotate_own'])
# rotate
config.add_section(rotate['section'])
config.set(rotate['section'], 'on', str(rotate['on']))
config.set(rotate['section'], 'rotate', str(rotate['rotate']))
config.set(rotate['section'], 'own', rotate['own'])
config.set(rotate['section'], 'color', rotate['color'])
# crop
config.add_section(crop['section'])
config.set(crop['section'], 'on', str(crop['on']))
config.set(crop['section'], 'crop', str(crop['crop']))
config.set(crop['section'], '1_x1', crop['1_x1'])
config.set(crop['section'], '1_y1', crop['1_y1'])
config.set(crop['section'], '1_x2', crop['1_x2'])
config.set(crop['section'], '1_y2', crop['1_y2'])
config.set(crop['section'], '2_x1', crop['2_x1'])
config.set(crop['section'], '2_y1', crop['2_y1'])
config.set(crop['section'], '2_width', crop['2_width'])
config.set(crop['section'], '2_height', crop['2_height'])
config.set(crop['section'], '3_dx', crop['3_dx'])
config.set(crop['section'], '3_dy', crop['3_dy'])
config.set(crop['section'], '3_width', crop['3_width'])
config.set(crop['section'], '3_height', crop['3_height'])
config.set(crop['section'], 'gravity', crop['gravity'])
# border
config.add_section(border['section'])
config.set(border['section'], 'on', str(border['on']))
config.set(border['section'], 'color', border['color'])
config.set(border['section'], 'size_x', border['size_x'])
config.set(border['section'], 'size_y', border['size_y'])
# color
config.add_section(color['section'])
config.set(color['section'], 'on', str(color['on']))
config.set(color['section'], 'black-white', str(color['black-white']))
config.set(color['section'], 'sepia', color['sepia'])
# normalize
config.add_section(normalize['section'])
config.set(normalize['section'], 'on', str(normalize['on']))
config.set(normalize['section'], 'normalize', str(normalize['normalize']))
config.set(normalize['section'], 'channel', normalize['channel'])
# contrast
config.add_section(contrast['section'])
config.set(contrast['section'], 'on', str(contrast['on']))
config.set(contrast['section'], 'contrast', str(contrast['contrast']))
config.set(contrast['section'], 'selection', contrast['selection'])
config.set(contrast['section'], 'contrast_stretch_1', contrast['contrast_stretch_1'])
config.set(contrast['section'], 'contrast_stretch_2', contrast['contrast_stretch_2'])
# mirror
config.add_section(mirror['section'])
config.set(mirror['section'], 'on', str(mirror['on']))
config.set(mirror['section'], 'flip', str(mirror['flip']))
config.set(mirror['section'], 'flop', str(mirror['flop']))
# vignette
config.add_section(vignette['section'])
config.set(vignette['section'], 'on', str(vignette['on']))
config.set(vignette['section'], 'dx', str(common.empty(vignette['dx'])))
config.set(vignette['section'], 'dy', str(common.empty(vignette['dy'])))
config.set(vignette['section'], 'radius', vignette['radius'])
config.set(vignette['section'], 'sigma', vignette['sigma'])
config.set(vignette['section'], 'color', vignette['color'])
# logo
config.add_section(logo['section'])
config.set(logo['section'], 'on', str(logo['on']))
config.set(logo['section'], 'logo', logo['logo'])
config.set(logo['section'], 'gravity', logo['gravity'])
config.set(logo['section'], 'width', logo['width'])
config.set(logo['section'], 'height', logo['height'])
config.set(logo['section'], 'dx', logo['dx'])
config.set(logo['section'], 'dy', logo['dy'])
# save to a file
try:
with open(file_ini, 'w', encoding='utf-8', buffering=1) as configfile:
config.write(configfile)
except:
log.write_log("ini_save: cannot save config file: " + file_ini, "E") | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/summernote/lang/summernote-uz-UZ.min.js | !function(e,t){if("object"==typeof exports&&"object"==typeof module)module.exports=t();else if("function"==typeof define&&define.amd)define([],t);else{var r=t();for(var o in r)("object"==typeof exports?exports:e)[o]=r[o]}}(self,(function(){return(e=jQuery).extend(e.summernote.lang,{"uz-UZ":{font:{bold:"қалин",italic:"Курсив",underline:"Белгиланган",clear:"Ҳарф турларини олиб ташлаш",height:"Чизиқ баландлиги",name:"Ҳарф",strikethrough:"Ўчирилган",subscript:"Пастки индекс",superscript:"Юқори индекс",size:"ҳарф ҳажми"},image:{image:"Расм",insert:"расмни қўйиш",resizeFull:"Ҳажмни тиклаш",resizeHalf:"50% гача кичрайтириш",resizeQuarter:"25% гача кичрайтириш",floatLeft:"Чапда жойлаштириш",floatRight:"Ўнгда жойлаштириш",floatNone:"Стандарт бўйича жойлашув",shapeRounded:"Шакли: Юмалоқ",shapeCircle:"Шакли: Доира",shapeThumbnail:"Шакли: Миниатюра",shapeNone:"Шакли: Йўқ",dragImageHere:"Суратни кўчириб ўтинг",dropImage:"Суратни кўчириб ўтинг",selectFromFiles:"Файллардан бирини танлаш",url:"суратлар URL и",remove:"Суратни ўчириш"},video:{video:"Видео",videoLink:"Видеога ҳавола",insert:"Видео",url:"URL видео",providers:"(YouTube, Vimeo, Vine, Instagram, DailyMotion или Youku)"},link:{link:"Ҳавола",insert:"Ҳаволани қўйиш",unlink:"Ҳаволани олиб ташлаш",edit:"Таҳрир қилиш",textToDisplay:"Кўринадиган матн",url:"URL ўтиш учун",openInNewWindow:"Янги дарчада очиш"},table:{table:"Жадвал"},hr:{insert:"Горизонтал чизиқни қўйиш"},style:{style:"Услуб",p:"Яхши",blockquote:"Жумла",pre:"Код",h1:"Сарлавҳа 1",h2:"Сарлавҳа 2",h3:"Сарлавҳа 3",h4:"Сарлавҳа 4",h5:"Сарлавҳа 5",h6:"Сарлавҳа 6"},lists:{unordered:"Белгиланган рўйҳат",ordered:"Рақамланган рўйҳат"},options:{help:"Ёрдам",fullscreen:"Бутун экран бўйича",codeview:"Бошланғич код"},paragraph:{paragraph:"Параграф",outdent:"Орқага қайтишни камайтириш",indent:"Орқага қайтишни кўпайтириш",left:"Чап қирғоққа тўғрилаш",center:"Марказга тўғрилаш",right:"Ўнг қирғоққа тўғрилаш",justify:"Эни бўйлаб чўзиш"},color:{recent:"Охирги ранг",more:"Яна ранглар",background:"Фон ранги",foreground:"Ҳарф ранги",transparent:"Шаффоф",setTransparent:"Шаффофдай қилиш",reset:"Бекор қилиш",resetToDefault:"Стандартга оид тиклаш"},shortcut:{shortcuts:"Клавишларнинг ҳамохҳанглиги",close:"Ёпиқ",textFormatting:"Матнни ",action:"Ҳаркат",paragraphFormatting:"Параграфни форматлаш",documentStyle:"Ҳужжатнинг тури",extraKeys:"Қўшимча имкониятлар"},history:{undo:"Бекор қилиш",redo:"Қайтариш"}}}),{};var e})); | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/db/backends/ddl_references.py | from copy import deepcopy
class Reference:
"""Base class that defines the reference interface."""
def references_table(self, table):
"""
Return whether or not this instance references the specified table.
"""
return False
def references_column(self, table, column):
"""
Return whether or not this instance references the specified column.
"""
return False
def rename_table_references(self, old_table, new_table):
"""
Rename all references to the old_name to the new_table.
"""
pass
def rename_column_references(self, table, old_column, new_column):
"""
Rename all references to the old_column to the new_column.
"""
pass
def __repr__(self):
return "<%s %r>" % (self.__class__.__name__, str(self))
def __str__(self):
raise NotImplementedError(
"Subclasses must define how they should be converted to string."
)
class Table(Reference):
"""Hold a reference to a table."""
def __init__(self, table, quote_name):
self.table = table
self.quote_name = quote_name
def references_table(self, table):
return self.table == table
def rename_table_references(self, old_table, new_table):
if self.table == old_table:
self.table = new_table
def __str__(self):
return self.quote_name(self.table)
class TableColumns(Table):
"""Base class for references to multiple columns of a table."""
def __init__(self, table, columns):
self.table = table
self.columns = columns
def references_column(self, table, column):
return self.table == table and column in self.columns
def rename_column_references(self, table, old_column, new_column):
if self.table == table:
for index, column in enumerate(self.columns):
if column == old_column:
self.columns[index] = new_column
class Columns(TableColumns):
"""Hold a reference to one or many columns."""
def __init__(self, table, columns, quote_name, col_suffixes=()):
self.quote_name = quote_name
self.col_suffixes = col_suffixes
super().__init__(table, columns)
def __str__(self):
def col_str(column, idx):
col = self.quote_name(column)
try:
suffix = self.col_suffixes[idx]
if suffix:
col = "{} {}".format(col, suffix)
except IndexError:
pass
return col
return ", ".join(
col_str(column, idx) for idx, column in enumerate(self.columns)
)
class IndexName(TableColumns):
"""Hold a reference to an index name."""
def __init__(self, table, columns, suffix, create_index_name):
self.suffix = suffix
self.create_index_name = create_index_name
super().__init__(table, columns)
def __str__(self):
return self.create_index_name(self.table, self.columns, self.suffix)
class IndexColumns(Columns):
def __init__(self, table, columns, quote_name, col_suffixes=(), opclasses=()):
self.opclasses = opclasses
super().__init__(table, columns, quote_name, col_suffixes)
def __str__(self):
def col_str(column, idx):
# Index.__init__() guarantees that self.opclasses is the same
# length as self.columns.
col = "{} {}".format(self.quote_name(column), self.opclasses[idx])
try:
suffix = self.col_suffixes[idx]
if suffix:
col = "{} {}".format(col, suffix)
except IndexError:
pass
return col
return ", ".join(
col_str(column, idx) for idx, column in enumerate(self.columns)
)
class ForeignKeyName(TableColumns):
"""Hold a reference to a foreign key name."""
def __init__(
self,
from_table,
from_columns,
to_table,
to_columns,
suffix_template,
create_fk_name,
):
self.to_reference = TableColumns(to_table, to_columns)
self.suffix_template = suffix_template
self.create_fk_name = create_fk_name
super().__init__(
from_table,
from_columns,
)
def references_table(self, table):
return super().references_table(table) or self.to_reference.references_table(
table
)
def references_column(self, table, column):
return super().references_column(
table, column
) or self.to_reference.references_column(table, column)
def rename_table_references(self, old_table, new_table):
super().rename_table_references(old_table, new_table)
self.to_reference.rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
super().rename_column_references(table, old_column, new_column)
self.to_reference.rename_column_references(table, old_column, new_column)
def __str__(self):
suffix = self.suffix_template % {
"to_table": self.to_reference.table,
"to_column": self.to_reference.columns[0],
}
return self.create_fk_name(self.table, self.columns, suffix)
class Statement(Reference):
"""
Statement template and formatting parameters container.
Allows keeping a reference to a statement without interpolating identifiers
that might have to be adjusted if they're referencing a table or column
that is removed
"""
def __init__(self, template, **parts):
self.template = template
self.parts = parts
def references_table(self, table):
return any(
hasattr(part, "references_table") and part.references_table(table)
for part in self.parts.values()
)
def references_column(self, table, column):
return any(
hasattr(part, "references_column") and part.references_column(table, column)
for part in self.parts.values()
)
def rename_table_references(self, old_table, new_table):
for part in self.parts.values():
if hasattr(part, "rename_table_references"):
part.rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
for part in self.parts.values():
if hasattr(part, "rename_column_references"):
part.rename_column_references(table, old_column, new_column)
def __str__(self):
return self.template % self.parts
class Expressions(TableColumns):
def __init__(self, table, expressions, compiler, quote_value):
self.compiler = compiler
self.expressions = expressions
self.quote_value = quote_value
columns = [
col.target.column
for col in self.compiler.query._gen_cols([self.expressions])
]
super().__init__(table, columns)
def rename_table_references(self, old_table, new_table):
if self.table != old_table:
return
self.expressions = self.expressions.relabeled_clone({old_table: new_table})
super().rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
if self.table != table:
return
expressions = deepcopy(self.expressions)
self.columns = []
for col in self.compiler.query._gen_cols([expressions]):
if col.target.column == old_column:
col.target.column = new_column
self.columns.append(col.target.column)
self.expressions = expressions
def __str__(self):
sql, params = self.compiler.compile(self.expressions)
params = map(self.quote_value, params)
return sql % tuple(params) | PypiClean |
/Bis-Miner-3.11.1.tar.gz/Bis-Miner-3.11.0/Orange/widgets/utils/__init__.py | import inspect
import sys
from AnyQt.QtCore import QObject
from Orange.data.variable import TimeVariable
from Orange.util import deepgetattr
def vartype(var):
if var.is_discrete:
return 1
elif var.is_continuous:
if isinstance(var, TimeVariable):
return 4
return 2
elif var.is_string:
return 3
else:
return 0
def progress_bar_milestones(count, iterations=100):
return set([int(i*count/float(iterations)) for i in range(iterations)])
def getdeepattr(obj, attr, *arg, **kwarg):
if isinstance(obj, dict):
return obj.get(attr)
return deepgetattr(obj, attr, *arg, **kwarg)
def to_html(str):
return str.replace("<=", "≤").replace(">=", "≥").\
replace("<", "<").replace(">", ">").replace("=\\=", "≠")
getHtmlCompatibleString = to_html
def get_variable_values_sorted(variable):
"""
Return a list of sorted values for given attribute, if all its values can be
cast to int's.
"""
if variable.is_continuous:
return []
try:
return sorted(variable.values, key=int)
except ValueError:
return variable.values
def dumpObjectTree(obj, _indent=0):
"""
Dumps Qt QObject tree. Aids in debugging internals.
See also: QObject.dumpObjectTree()
"""
assert isinstance(obj, QObject)
print('{indent}{type} "{name}"'.format(indent=' ' * (_indent * 4),
type=type(obj).__name__,
name=obj.objectName()),
file=sys.stderr)
for child in obj.children():
dumpObjectTree(child, _indent + 1)
def getmembers(obj, predicate=None):
"""Return all the members of an object in a list of (name, value) pairs sorted by name.
Behaves like inspect.getmembers. If a type object is passed as a predicate,
only members of that type are returned.
"""
if isinstance(predicate, type):
def mypredicate(x):
return isinstance(x, predicate)
else:
mypredicate = predicate
return inspect.getmembers(obj, mypredicate) | PypiClean |
/Genshi-0.7.7-py3-none-any.whl/genshi/filters/html.py | import re
import six
from genshi.core import Attrs, QName, stripentities
from genshi.core import END, START, TEXT, COMMENT
__all__ = ['HTMLFormFiller', 'HTMLSanitizer']
__docformat__ = 'restructuredtext en'
class HTMLFormFiller(object):
"""A stream filter that can populate HTML forms from a dictionary of values.
>>> from genshi.input import HTML
>>> html = HTML('''<form>
... <p><input type="text" name="foo" /></p>
... </form>''', encoding='utf-8')
>>> filler = HTMLFormFiller(data={'foo': 'bar'})
>>> print(html | filler)
<form>
<p><input type="text" name="foo" value="bar"/></p>
</form>
"""
# TODO: only select the first radio button, and the first select option
# (if not in a multiple-select)
# TODO: only apply to elements in the XHTML namespace (or no namespace)?
def __init__(self, name=None, id=None, data=None, passwords=False):
"""Create the filter.
:param name: The name of the form that should be populated. If this
parameter is given, only forms where the ``name`` attribute
value matches the parameter are processed.
:param id: The ID of the form that should be populated. If this
parameter is given, only forms where the ``id`` attribute
value matches the parameter are processed.
:param data: The dictionary of form values, where the keys are the names
of the form fields, and the values are the values to fill
in.
:param passwords: Whether password input fields should be populated.
This is off by default for security reasons (for
example, a password may end up in the browser cache)
:note: Changed in 0.5.2: added the `passwords` option
"""
self.name = name
self.id = id
if data is None:
data = {}
self.data = data
self.passwords = passwords
def __call__(self, stream):
"""Apply the filter to the given stream.
:param stream: the markup event stream to filter
"""
in_form = in_select = in_option = in_textarea = False
select_value = option_value = textarea_value = None
option_start = None
option_text = []
no_option_value = False
for kind, data, pos in stream:
if kind is START:
tag, attrs = data
tagname = tag.localname
if tagname == 'form' and (
self.name and attrs.get('name') == self.name or
self.id and attrs.get('id') == self.id or
not (self.id or self.name)):
in_form = True
elif in_form:
if tagname == 'input':
type = attrs.get('type', '').lower()
if type in ('checkbox', 'radio'):
name = attrs.get('name')
if name and name in self.data:
value = self.data[name]
declval = attrs.get('value')
checked = False
if isinstance(value, (list, tuple)):
if declval is not None:
u_vals = [six.text_type(v) for v in value]
checked = declval in u_vals
else:
checked = any(value)
else:
if declval is not None:
checked = declval == six.text_type(value)
elif type == 'checkbox':
checked = bool(value)
if checked:
attrs |= [(QName('checked'), 'checked')]
elif 'checked' in attrs:
attrs -= 'checked'
elif type in ('', 'hidden', 'text') \
or type == 'password' and self.passwords:
name = attrs.get('name')
if name and name in self.data:
value = self.data[name]
if isinstance(value, (list, tuple)):
value = value[0]
if value is not None:
attrs |= [
(QName('value'), six.text_type(value))
]
elif tagname == 'select':
name = attrs.get('name')
if name in self.data:
select_value = self.data[name]
in_select = True
elif tagname == 'textarea':
name = attrs.get('name')
if name in self.data:
textarea_value = self.data.get(name)
if isinstance(textarea_value, (list, tuple)):
textarea_value = textarea_value[0]
in_textarea = True
elif in_select and tagname == 'option':
option_start = kind, data, pos
option_value = attrs.get('value')
if option_value is None:
no_option_value = True
option_value = ''
in_option = True
continue
yield kind, (tag, attrs), pos
elif in_form and kind is TEXT:
if in_select and in_option:
if no_option_value:
option_value += data
option_text.append((kind, data, pos))
continue
elif in_textarea:
continue
yield kind, data, pos
elif in_form and kind is END:
tagname = data.localname
if tagname == 'form':
in_form = False
elif tagname == 'select':
in_select = False
select_value = None
elif in_select and tagname == 'option':
if isinstance(select_value, (tuple, list)):
selected = option_value in [six.text_type(v) for v
in select_value]
else:
selected = option_value == six.text_type(select_value)
okind, (tag, attrs), opos = option_start
if selected:
attrs |= [(QName('selected'), 'selected')]
elif 'selected' in attrs:
attrs -= 'selected'
yield okind, (tag, attrs), opos
if option_text:
for event in option_text:
yield event
in_option = False
no_option_value = False
option_start = option_value = None
option_text = []
elif in_textarea and tagname == 'textarea':
if textarea_value:
yield TEXT, six.text_type(textarea_value), pos
textarea_value = None
in_textarea = False
yield kind, data, pos
else:
yield kind, data, pos
class HTMLSanitizer(object):
"""A filter that removes potentially dangerous HTML tags and attributes
from the stream.
>>> from genshi import HTML
>>> html = HTML('<div><script>alert(document.cookie)</script></div>', encoding='utf-8')
>>> print(html | HTMLSanitizer())
<div/>
The default set of safe tags and attributes can be modified when the filter
is instantiated. For example, to allow inline ``style`` attributes, the
following instantation would work:
>>> html = HTML('<div style="background: #000"></div>', encoding='utf-8')
>>> sanitizer = HTMLSanitizer(safe_attrs=HTMLSanitizer.SAFE_ATTRS | set(['style']))
>>> print(html | sanitizer)
<div style="background: #000"/>
Note that even in this case, the filter *does* attempt to remove dangerous
constructs from style attributes:
>>> html = HTML('<div style="background: url(javascript:void); color: #000"></div>', encoding='utf-8')
>>> print(html | sanitizer)
<div style="color: #000"/>
This handles HTML entities, unicode escapes in CSS and Javascript text, as
well as a lot of other things. However, the style tag is still excluded by
default because it is very hard for such sanitizing to be completely safe,
especially considering how much error recovery current web browsers perform.
It also does some basic filtering of CSS properties that may be used for
typical phishing attacks. For more sophisticated filtering, this class
provides a couple of hooks that can be overridden in sub-classes.
:warn: Note that this special processing of CSS is currently only applied to
style attributes, **not** style elements.
"""
SAFE_TAGS = frozenset(['a', 'abbr', 'acronym', 'address', 'area', 'b',
'big', 'blockquote', 'br', 'button', 'caption', 'center', 'cite',
'code', 'col', 'colgroup', 'dd', 'del', 'dfn', 'dir', 'div', 'dl', 'dt',
'em', 'fieldset', 'font', 'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'hr', 'i', 'img', 'input', 'ins', 'kbd', 'label', 'legend', 'li', 'map',
'menu', 'ol', 'optgroup', 'option', 'p', 'pre', 'q', 's', 'samp',
'select', 'small', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'tfoot', 'th', 'thead', 'tr', 'tt', 'u',
'ul', 'var'])
SAFE_ATTRS = frozenset(['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'axis', 'bgcolor', 'border', 'cellpadding',
'cellspacing', 'char', 'charoff', 'charset', 'checked', 'cite', 'class',
'clear', 'cols', 'colspan', 'color', 'compact', 'coords', 'datetime',
'dir', 'disabled', 'enctype', 'for', 'frame', 'headers', 'height',
'href', 'hreflang', 'hspace', 'id', 'ismap', 'label', 'lang',
'longdesc', 'maxlength', 'media', 'method', 'multiple', 'name',
'nohref', 'noshade', 'nowrap', 'prompt', 'readonly', 'rel', 'rev',
'rows', 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size',
'span', 'src', 'start', 'summary', 'tabindex', 'target', 'title',
'type', 'usemap', 'valign', 'value', 'vspace', 'width'])
SAFE_CSS = frozenset([
# CSS 3 properties <http://www.w3.org/TR/CSS/#properties>
'background', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'border', 'border-bottom', 'border-bottom-color',
'border-bottom-style', 'border-bottom-width', 'border-collapse',
'border-color', 'border-left', 'border-left-color',
'border-left-style', 'border-left-width', 'border-right',
'border-right-color', 'border-right-style', 'border-right-width',
'border-spacing', 'border-style', 'border-top', 'border-top-color',
'border-top-style', 'border-top-width', 'border-width', 'bottom',
'caption-side', 'clear', 'clip', 'color', 'content',
'counter-increment', 'counter-reset', 'cursor', 'direction', 'display',
'empty-cells', 'float', 'font', 'font-family', 'font-size',
'font-style', 'font-variant', 'font-weight', 'height', 'left',
'letter-spacing', 'line-height', 'list-style', 'list-style-image',
'list-style-position', 'list-style-type', 'margin', 'margin-bottom',
'margin-left', 'margin-right', 'margin-top', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline',
'outline-color', 'outline-style', 'outline-width', 'overflow',
'padding', 'padding-bottom', 'padding-left', 'padding-right',
'padding-top', 'page-break-after', 'page-break-before',
'page-break-inside', 'quotes', 'right', 'table-layout',
'text-align', 'text-decoration', 'text-indent', 'text-transform',
'top', 'unicode-bidi', 'vertical-align', 'visibility', 'white-space',
'widows', 'width', 'word-spacing', 'z-index',
])
SAFE_SCHEMES = frozenset(['file', 'ftp', 'http', 'https', 'mailto', None])
URI_ATTRS = frozenset(['action', 'background', 'dynsrc', 'href', 'lowsrc',
'src'])
def __init__(self, safe_tags=SAFE_TAGS, safe_attrs=SAFE_ATTRS,
safe_schemes=SAFE_SCHEMES, uri_attrs=URI_ATTRS,
safe_css=SAFE_CSS):
"""Create the sanitizer.
The exact set of allowed elements and attributes can be configured.
:param safe_tags: a set of tag names that are considered safe
:param safe_attrs: a set of attribute names that are considered safe
:param safe_schemes: a set of URI schemes that are considered safe
:param uri_attrs: a set of names of attributes that contain URIs
"""
self.safe_tags = safe_tags
# The set of tag names that are considered safe.
self.safe_attrs = safe_attrs
# The set of attribute names that are considered safe.
self.safe_css = safe_css
# The set of CSS properties that are considered safe.
self.uri_attrs = uri_attrs
# The set of names of attributes that may contain URIs.
self.safe_schemes = safe_schemes
# The set of URI schemes that are considered safe.
# IE6 <http://heideri.ch/jso/#80>
_EXPRESSION_SEARCH = re.compile(u"""
[eE
\uFF25 # FULLWIDTH LATIN CAPITAL LETTER E
\uFF45 # FULLWIDTH LATIN SMALL LETTER E
]
[xX
\uFF38 # FULLWIDTH LATIN CAPITAL LETTER X
\uFF58 # FULLWIDTH LATIN SMALL LETTER X
]
[pP
\uFF30 # FULLWIDTH LATIN CAPITAL LETTER P
\uFF50 # FULLWIDTH LATIN SMALL LETTER P
]
[rR
\u0280 # LATIN LETTER SMALL CAPITAL R
\uFF32 # FULLWIDTH LATIN CAPITAL LETTER R
\uFF52 # FULLWIDTH LATIN SMALL LETTER R
]
[eE
\uFF25 # FULLWIDTH LATIN CAPITAL LETTER E
\uFF45 # FULLWIDTH LATIN SMALL LETTER E
]
[sS
\uFF33 # FULLWIDTH LATIN CAPITAL LETTER S
\uFF53 # FULLWIDTH LATIN SMALL LETTER S
]{2}
[iI
\u026A # LATIN LETTER SMALL CAPITAL I
\uFF29 # FULLWIDTH LATIN CAPITAL LETTER I
\uFF49 # FULLWIDTH LATIN SMALL LETTER I
]
[oO
\uFF2F # FULLWIDTH LATIN CAPITAL LETTER O
\uFF4F # FULLWIDTH LATIN SMALL LETTER O
]
[nN
\u0274 # LATIN LETTER SMALL CAPITAL N
\uFF2E # FULLWIDTH LATIN CAPITAL LETTER N
\uFF4E # FULLWIDTH LATIN SMALL LETTER N
]
""", re.VERBOSE).search
# IE6 <http://openmya.hacker.jp/hasegawa/security/expression.txt>
# 7) Particular bit of Unicode characters
_URL_FINDITER = re.compile(
u'[Uu][Rr\u0280][Ll\u029F]%s*\(([^)]+)' % (r'\s')).finditer
def __call__(self, stream):
"""Apply the filter to the given stream.
:param stream: the markup event stream to filter
"""
waiting_for = None
for kind, data, pos in stream:
if kind is START:
if waiting_for:
continue
tag, attrs = data
if not self.is_safe_elem(tag, attrs):
waiting_for = tag
continue
new_attrs = []
for attr, value in attrs:
value = stripentities(value)
if attr not in self.safe_attrs:
continue
elif attr in self.uri_attrs:
# Don't allow URI schemes such as "javascript:"
if not self.is_safe_uri(value):
continue
elif attr == 'style':
# Remove dangerous CSS declarations from inline styles
decls = self.sanitize_css(value)
if not decls:
continue
value = '; '.join(decls)
new_attrs.append((attr, value))
yield kind, (tag, Attrs(new_attrs)), pos
elif kind is END:
tag = data
if waiting_for:
if waiting_for == tag:
waiting_for = None
else:
yield kind, data, pos
elif kind is not COMMENT:
if not waiting_for:
yield kind, data, pos
def is_safe_css(self, propname, value):
"""Determine whether the given css property declaration is to be
considered safe for inclusion in the output.
:param propname: the CSS property name
:param value: the value of the property
:return: whether the property value should be considered safe
:rtype: bool
:since: version 0.6
"""
if propname not in self.safe_css:
return False
if propname.startswith('margin') and '-' in value:
# Negative margins can be used for phishing
return False
return True
def is_safe_elem(self, tag, attrs):
"""Determine whether the given element should be considered safe for
inclusion in the output.
:param tag: the tag name of the element
:type tag: QName
:param attrs: the element attributes
:type attrs: Attrs
:return: whether the element should be considered safe
:rtype: bool
:since: version 0.6
"""
if tag not in self.safe_tags:
return False
if tag.localname == 'input':
input_type = attrs.get('type', '').lower()
if input_type == 'password':
return False
return True
def is_safe_uri(self, uri):
"""Determine whether the given URI is to be considered safe for
inclusion in the output.
The default implementation checks whether the scheme of the URI is in
the set of allowed URIs (`safe_schemes`).
>>> sanitizer = HTMLSanitizer()
>>> sanitizer.is_safe_uri('http://example.org/')
True
>>> sanitizer.is_safe_uri('javascript:alert(document.cookie)')
False
:param uri: the URI to check
:return: `True` if the URI can be considered safe, `False` otherwise
:rtype: `bool`
:since: version 0.4.3
"""
if '#' in uri:
uri = uri.split('#', 1)[0] # Strip out the fragment identifier
if ':' not in uri:
return True # This is a relative URI
chars = [char for char in uri.split(':', 1)[0] if char.isalnum()]
return ''.join(chars).lower() in self.safe_schemes
def sanitize_css(self, text):
"""Remove potentially dangerous property declarations from CSS code.
In particular, properties using the CSS ``url()`` function with a scheme
that is not considered safe are removed:
>>> sanitizer = HTMLSanitizer()
>>> sanitizer.sanitize_css(u'''
... background: url(javascript:alert("foo"));
... color: #000;
... ''')
['color: #000']
Also, the proprietary Internet Explorer function ``expression()`` is
always stripped:
>>> sanitizer.sanitize_css(u'''
... background: #fff;
... color: #000;
... width: e/**/xpression(alert("foo"));
... ''')
['background: #fff', 'color: #000']
:param text: the CSS text; this is expected to be `unicode` and to not
contain any character or numeric references
:return: a list of declarations that are considered safe
:rtype: `list`
:since: version 0.4.3
"""
decls = []
text = self._strip_css_comments(self._replace_unicode_escapes(text))
for decl in text.split(';'):
decl = decl.strip()
if not decl:
continue
try:
propname, value = decl.split(':', 1)
except ValueError:
continue
if not self.is_safe_css(propname.strip().lower(), value.strip()):
continue
is_evil = False
if self._EXPRESSION_SEARCH(value):
is_evil = True
for match in self._URL_FINDITER(value):
if not self.is_safe_uri(match.group(1)):
is_evil = True
break
if not is_evil:
decls.append(decl.strip())
return decls
_NORMALIZE_NEWLINES = re.compile(r'\r\n').sub
_UNICODE_ESCAPE = re.compile(
r"""\\([0-9a-fA-F]{1,6})\s?|\\([^\r\n\f0-9a-fA-F'"{};:()#*])""",
re.UNICODE).sub
def _replace_unicode_escapes(self, text):
def _repl(match):
t = match.group(1)
if t:
return six.unichr(int(t, 16))
t = match.group(2)
if t == '\\':
return r'\\'
else:
return t
return self._UNICODE_ESCAPE(_repl, self._NORMALIZE_NEWLINES('\n', text))
_CSS_COMMENTS = re.compile(r'/\*.*?\*/').sub
def _strip_css_comments(self, text):
return self._CSS_COMMENTS('', text) | PypiClean |
/KingSnake-2.0.0.tar.gz/KingSnake-2.0.0/king_snake/player.py |
from king_snake.errors import (FieldMustBeCastledError,
FieldOccupiedError,
IllegalMoveError,
PawnMustCaptureError,
TurnError)
from king_snake.figures import Pawn, Rook, Knight, Bishop, Queen, King
class Player(object):
"""A chess player."""
def __repr__(self):
return "Player()"
def __str__(self):
if self.chessboard:
return_string = ("{color} Player on "
"{chessboard}\n"
"Figures: "
"{figures}".format(color=self.color,
chessboard=self.chessboard,
figures=self.figures))
else:
return_string = self.__repr__()
return return_string
def __init__(self):
self.chessboard = None
self.figures = None
self.king = None
self.color = None
@property
def opponent(self):
"""Return other player in chess game"""
if self.color == "white":
return self.chessboard.players["black"]
else:
return self.chessboard.players["white"]
def set_up_board(self, chessboard):
"""Set up pieces on given chessboard and find other player."""
self.chessboard = chessboard
if self == self.chessboard.players["white"]:
self.color = "white"
else:
self.color = "black"
self.figures = list(Pawn(self) for pawns in range(8))
for doubled_piece in (Rook, Knight, Bishop) * 2:
self.figures.append(doubled_piece(self))
self.figures.append(Queen(self))
self.king = King(self)
self.figures.append(self.king)
def move(self, start, goal):
"""
Move a piece to a new field.
First verify if self is the chessboard's current player. Then check if
a moveable figure is located at the start field. If the piece can be
moved, move to the goal field, capturing a figure at the goal field if
necessary. Finally, check if the move would put the own king in check.
If yes, roll back the move. Otherwise, record the current turn on all
moved pieces and end the turn.
@param start_field - String used to look up field object (e.g. "E2")
@param goal_field - Like start_field
"""
if self != self.chessboard.current_player:
raise TurnError("Move attempted out of turn.")
start_field = self.chessboard.fields[start]
goal_field = self.chessboard.fields[goal]
figure = start_field.figure
if not figure in self.figures:
raise IllegalMoveError("Player does not own a piece at given "
"position.")
try:
figure.move(goal_field)
captured_piece = None
except (FieldOccupiedError, PawnMustCaptureError):
captured_piece = figure.capture(goal_field)
except FieldMustBeCastledError:
captured_piece = figure.castle(goal_field)
if self.king.in_check:
self.chessboard.rollback()
raise IllegalMoveError("Move would put player's king in check.")
figure.already_moved = True
figure.last_moved = self.chessboard.current_move
if captured_piece:
captured_piece.last_moved = self.chessboard.current_move
self.chessboard.end_turn(start, goal) | PypiClean |
/GeoNDT-0.4.0.tar.gz/GeoNDT-0.4.0/docs/.build/html/readme.html | <!DOCTYPE html>
<html class="writer-html4" lang="en" >
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Overview — GeoNDT Python library "0.1.0" documentation</title>
<link rel="stylesheet" href="_static/css/theme.css" type="text/css" />
<link rel="stylesheet" href="_static/pygments.css" type="text/css" />
<!--[if lt IE 9]>
<script src="_static/js/html5shiv.min.js"></script>
<![endif]-->
<script type="text/javascript" id="documentation_options" data-url_root="./" src="_static/documentation_options.js"></script>
<script type="text/javascript" src="_static/jquery.js"></script>
<script type="text/javascript" src="_static/underscore.js"></script>
<script type="text/javascript" src="_static/doctools.js"></script>
<script type="text/javascript" src="_static/language_data.js"></script>
<script type="text/javascript" src="_static/js/theme.js"></script>
<link rel="index" title="Index" href="genindex.html" />
<link rel="search" title="Search" href="search.html" />
<link rel="next" title="Installation" href="installation.html" />
<link rel="prev" title="Contents" href="index.html" />
</head>
<body class="wy-body-for-nav">
<div class="wy-grid-for-nav">
<nav data-toggle="wy-nav-shift" class="wy-nav-side">
<div class="wy-side-scroll">
<div class="wy-side-nav-search" >
<a href="index.html" class="icon icon-home"> GeoNDT Python library
</a>
<div class="version">
"0.1.0"
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="search.html" method="get">
<input type="text" name="q" placeholder="Search docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<div class="wy-menu wy-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<ul class="current">
<li class="toctree-l1 current"><a class="current reference internal" href="#">Overview</a><ul>
<li class="toctree-l2"><a class="reference internal" href="#quick-start">Quick start</a><ul>
<li class="toctree-l3"><a class="reference internal" href="#usage">Usage</a></li>
</ul>
</li>
<li class="toctree-l2"><a class="reference internal" href="#troubleshooting">Troubleshooting</a></li>
<li class="toctree-l2"><a class="reference internal" href="#references">References</a></li>
</ul>
</li>
<li class="toctree-l1"><a class="reference internal" href="installation.html">Installation</a></li>
<li class="toctree-l1"><a class="reference internal" href="usage.html">Usage examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="reference.html">References</a></li>
<li class="toctree-l1"><a class="reference internal" href="authors.html">Authors</a></li>
</ul>
</div>
</div>
</nav>
<section data-toggle="wy-nav-shift" class="wy-nav-content-wrap">
<nav class="wy-nav-top" aria-label="top navigation">
<i data-toggle="wy-nav-top" class="fa fa-bars"></i>
<a href="index.html">GeoNDT Python library</a>
</nav>
<div class="wy-nav-content">
<div class="rst-content">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="wy-breadcrumbs">
<li><a href="index.html" class="icon icon-home"></a> »</li>
<li>Overview</li>
<li class="wy-breadcrumbs-aside">
<a href="_sources/readme.rst.txt" rel="nofollow"> View page source</a>
</li>
</ul>
<hr/>
</div>
<div role="main" class="document" itemscope="itemscope" itemtype="http://schema.org/Article">
<div itemprop="articleBody">
<div class="section" id="overview">
<h1>Overview<a class="headerlink" href="#overview" title="Permalink to this headline">¶</a></h1>
<p>GeoNDT is a fast general-purpose computational tool for geotechnical non-destructive testing applications.
GeoNDT is flexible, general-purpose, and can be used seamlessly for advanced signal interpretation in geophysical
laboratory testing including the bender element (BE) and ultrasonic pulse velocity (UPV) tests, characterization of
complex multiphase geomaterials, in-situ shallow seismic geophysics including the falling weight deflectometer (FWD)
and multichannel analysis of surface waves (MASW) tests. The advanced physics-based signal interpretation feature of
GeoNDT allows the quantitative characterization of geophysical and geomechanical properties of geomaterials and multilayered
geosystems independently without making any simplified assumptions as common in the current practice.</p>
<div class="section" id="quick-start">
<h2>Quick start<a class="headerlink" href="#quick-start" title="Permalink to this headline">¶</a></h2>
<p>Install (only for Linux):</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">pip</span> <span class="n">install</span> <span class="n">geondt</span>
</pre></div>
</div>
<p>To install development version, clone this repo and install in Linux:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">git</span> <span class="n">clone</span> <span class="n">https</span><span class="p">:</span><span class="o">//</span><span class="n">github</span><span class="o">.</span><span class="n">com</span><span class="o">/</span><span class="n">siglab</span><span class="o">/</span><span class="n">geondt</span>
<span class="n">cd</span> <span class="n">geondt</span>
<span class="n">pip</span> <span class="n">install</span> <span class="o">-</span><span class="n">e</span> <span class="o">.</span>
</pre></div>
</div>
<p>To install development version, clone this repo and install in Windows:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">git</span> <span class="n">clone</span> <span class="n">https</span><span class="p">:</span><span class="o">//</span><span class="n">github</span><span class="o">.</span><span class="n">com</span><span class="o">/</span><span class="n">siglab</span><span class="o">/</span><span class="n">geondt</span>
<span class="n">cd</span> <span class="n">geondt</span>
<span class="n">python</span> <span class="n">setup</span><span class="o">.</span><span class="n">py</span> <span class="n">build</span> <span class="o">--</span><span class="n">compiler</span><span class="o">=</span><span class="n">mingw32</span>
<span class="n">python</span> <span class="n">setup</span><span class="o">.</span><span class="n">py</span> <span class="n">install</span>
</pre></div>
</div>
<div class="section" id="usage">
<h3>Usage<a class="headerlink" href="#usage" title="Permalink to this headline">¶</a></h3>
<blockquote>
<div><p>The GeoNDT can efficiently study the three-dimensional wave propagation within soil specimens in the BE test. Sample code is given as follows:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="gp">>>> </span><span class="kn">import</span> <span class="nn">numpy</span> <span class="k">as</span> <span class="nn">np</span>
<span class="gp">>>> </span><span class="kn">from</span> <span class="nn">geondt</span> <span class="k">import</span> <span class="n">one_phase_dynamic</span>
<span class="gp">>>> </span><span class="kn">import</span> <span class="nn">json</span>
<span class="gp">>>> </span><span class="k">with</span> <span class="nb">open</span><span class="p">(</span><span class="s1">'BE_dry.json'</span><span class="p">,</span> <span class="s2">"r"</span><span class="p">)</span> <span class="k">as</span> <span class="n">f</span><span class="p">:</span>
<span class="go"> data = json.load(f)</span>
<span class="gp">>>> </span><span class="n">BE</span> <span class="o">=</span> <span class="n">one_phase_dynamic</span><span class="p">(</span><span class="o">**</span><span class="n">data</span><span class="p">[</span><span class="s2">"input"</span><span class="p">])</span>
<span class="gp">>>> </span><span class="n">signal</span> <span class="o">=</span> <span class="n">BE</span><span class="o">.</span><span class="n">run_f</span><span class="p">()</span>
</pre></div>
</div>
</div></blockquote>
</div>
</div>
<div class="section" id="troubleshooting">
<h2>Troubleshooting<a class="headerlink" href="#troubleshooting" title="Permalink to this headline">¶</a></h2>
<p>The installation procedure assumes that the Fortran compiler such as Gfortran and Lapack library are installed on your system.
To install Gfortran and Lapack in Linux:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">sudo</span> <span class="n">apt</span> <span class="n">install</span> <span class="n">gfortran</span>
<span class="n">sudo</span> <span class="n">apt</span><span class="o">-</span><span class="n">get</span> <span class="n">install</span> <span class="n">liblapacke</span><span class="o">-</span><span class="n">dev</span> <span class="n">checkinstall</span>
<span class="n">export</span> <span class="n">gfortran</span><span class="o">=</span><span class="s2">"/home/kay/gcc-4.8.5/bin/gfortran"</span>
</pre></div>
</div>
<p>To install Gfortran and Lapack in Windows:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="o">*</span> <span class="n">Use</span> <span class="n">MinGW</span> <span class="o"><</span><span class="n">https</span><span class="p">:</span><span class="o">//</span><span class="n">sourceforge</span><span class="o">.</span><span class="n">net</span><span class="o">/</span><span class="n">projects</span><span class="o">/</span><span class="n">mingw</span><span class="o">-</span><span class="n">w64</span><span class="o">/></span> <span class="n">to</span> <span class="n">get</span> <span class="n">Gfortran</span><span class="o">.</span> <span class="n">Make</span> <span class="n">sure</span> <span class="n">the</span> <span class="n">Mingw</span> <span class="ow">is</span> <span class="n">added</span> <span class="n">to</span> <span class="n">the</span> <span class="n">system</span> <span class="n">path</span><span class="o">.</span>
<span class="o">*</span> <span class="n">Then</span> <span class="n">add</span> <span class="n">the</span> <span class="n">liblapack</span><span class="o">.</span><span class="n">a</span> <span class="n">file</span> <span class="p">(</span><span class="n">can</span> <span class="n">be</span> <span class="n">found</span> <span class="n">under</span> <span class="n">lib</span> <span class="n">folder</span> <span class="ow">in</span> <span class="n">this</span> <span class="n">respiratory</span> <span class="p">)</span> <span class="ow">in</span> <span class="n">the</span> <span class="n">MinGW</span> <span class="n">folder</span> <span class="p">(</span><span class="n">C</span><span class="p">:</span>\<span class="n">mingw64</span>\<span class="n">x86_64</span><span class="o">-</span><span class="n">w64</span><span class="o">-</span><span class="n">mingw32</span>\<span class="n">lib</span><span class="p">)</span><span class="o">.</span>
</pre></div>
</div>
</div>
<div class="section" id="references">
<h2>References<a class="headerlink" href="#references" title="Permalink to this headline">¶</a></h2>
<table class="docutils footnote" frame="void" id="id1" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label">[1]</td><td>Liu H, Maghoul P, Mantelet, G, Shalaby A
GeoNDT: a fast general-purpose computational tool for geotechnical non-destructive testing applications. Computers and Geotechnics.</td></tr>
</tbody>
</table>
<table class="docutils footnote" frame="void" id="id2" rules="none">
<colgroup><col class="label" /><col /></colgroup>
<tbody valign="top">
<tr><td class="label">[2]</td><td>Liu H, Maghoul P, Shalaby A, Bahari A, Moradi F.
Integrated approach for the MASW dispersion analysis using the spectral element technique and trust region reflective method.
Computers and Geotechnics. 2020 Sep 1;125:103689.</td></tr>
</tbody>
</table>
</div>
</div>
</div>
</div>
<footer>
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
<a href="installation.html" class="btn btn-neutral float-right" title="Installation" accesskey="n" rel="next">Next <span class="fa fa-arrow-circle-right" aria-hidden="true"></span></a>
<a href="index.html" class="btn btn-neutral float-left" title="Contents" accesskey="p" rel="prev"><span class="fa fa-arrow-circle-left" aria-hidden="true"></span> Previous</a>
</div>
<hr/>
<div role="contentinfo">
<p>
© Copyright 2021, Hongwei Liu, Pooneh Maghoul, Guillaume Mantelet, Ahmed Shalaby.
<span class="lastupdated">
Last updated on Jun 11, 2021.
</span>
</p>
</div>
Built with <a href="https://www.sphinx-doc.org/">Sphinx</a> using a
<a href="https://github.com/readthedocs/sphinx_rtd_theme">theme</a>
provided by <a href="https://readthedocs.org">Read the Docs</a>.
</footer>
</div>
</div>
</section>
</div>
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable(true);
});
</script>
</body>
</html> | PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/domexception/node_modules/webidl-conversions/README.md | # Web IDL Type Conversions on JavaScript Values
This package implements, in JavaScript, the algorithms to convert a given JavaScript value according to a given [Web IDL](http://heycam.github.io/webidl/) [type](http://heycam.github.io/webidl/#idl-types).
The goal is that you should be able to write code like
```js
"use strict";
const conversions = require("webidl-conversions");
function doStuff(x, y) {
x = conversions["boolean"](x);
y = conversions["unsigned long"](y);
// actual algorithm code here
}
```
and your function `doStuff` will behave the same as a Web IDL operation declared as
```webidl
void doStuff(boolean x, unsigned long y);
```
## API
This package's main module's default export is an object with a variety of methods, each corresponding to a different Web IDL type. Each method, when invoked on a JavaScript value, will give back the new JavaScript value that results after passing through the Web IDL conversion rules. (See below for more details on what that means.) Alternately, the method could throw an error, if the Web IDL algorithm is specified to do so: for example `conversions["float"](NaN)` [will throw a `TypeError`](http://heycam.github.io/webidl/#es-float).
Each method also accepts a second, optional, parameter for miscellaneous options. For conversion methods that throw errors, a string option `{ context }` may be provided to provide more information in the error message. (For example, `conversions["float"](NaN, { context: "Argument 1 of Interface's operation" })` will throw an error with message `"Argument 1 of Interface's operation is not a finite floating-point value."`) Specific conversions may also accept other options, the details of which can be found below.
## Conversions implemented
Conversions for all of the basic types from the Web IDL specification are implemented:
- [`any`](https://heycam.github.io/webidl/#es-any)
- [`void`](https://heycam.github.io/webidl/#es-void)
- [`boolean`](https://heycam.github.io/webidl/#es-boolean)
- [Integer types](https://heycam.github.io/webidl/#es-integer-types), which can additionally be provided the boolean options `{ clamp, enforceRange }` as a second parameter
- [`float`](https://heycam.github.io/webidl/#es-float), [`unrestricted float`](https://heycam.github.io/webidl/#es-unrestricted-float)
- [`double`](https://heycam.github.io/webidl/#es-double), [`unrestricted double`](https://heycam.github.io/webidl/#es-unrestricted-double)
- [`DOMString`](https://heycam.github.io/webidl/#es-DOMString), which can additionally be provided the boolean option `{ treatNullAsEmptyString }` as a second parameter
- [`ByteString`](https://heycam.github.io/webidl/#es-ByteString), [`USVString`](https://heycam.github.io/webidl/#es-USVString)
- [`object`](https://heycam.github.io/webidl/#es-object)
- [`Error`](https://heycam.github.io/webidl/#es-Error)
- [Buffer source types](https://heycam.github.io/webidl/#es-buffer-source-types)
Additionally, for convenience, the following derived type definitions are implemented:
- [`ArrayBufferView`](https://heycam.github.io/webidl/#ArrayBufferView)
- [`BufferSource`](https://heycam.github.io/webidl/#BufferSource)
- [`DOMTimeStamp`](https://heycam.github.io/webidl/#DOMTimeStamp)
- [`Function`](https://heycam.github.io/webidl/#Function)
- [`VoidFunction`](https://heycam.github.io/webidl/#VoidFunction) (although it will not censor the return type)
Derived types, such as nullable types, promise types, sequences, records, etc. are not handled by this library. You may wish to investigate the [webidl2js](https://github.com/jsdom/webidl2js) project.
### A note on the `long long` types
The `long long` and `unsigned long long` Web IDL types can hold values that cannot be stored in JavaScript numbers, so the conversion is imperfect. For example, converting the JavaScript number `18446744073709552000` to a Web IDL `long long` is supposed to produce the Web IDL value `-18446744073709551232`. Since we are representing our Web IDL values in JavaScript, we can't represent `-18446744073709551232`, so we instead the best we could do is `-18446744073709552000` as the output.
This library actually doesn't even get that far. Producing those results would require doing accurate modular arithmetic on 64-bit intermediate values, but JavaScript does not make this easy. We could pull in a big-integer library as a dependency, but in lieu of that, we for now have decided to just produce inaccurate results if you pass in numbers that are not strictly between `Number.MIN_SAFE_INTEGER` and `Number.MAX_SAFE_INTEGER`.
## Background
What's actually going on here, conceptually, is pretty weird. Let's try to explain.
Web IDL, as part of its madness-inducing design, has its own type system. When people write algorithms in web platform specs, they usually operate on Web IDL values, i.e. instances of Web IDL types. For example, if they were specifying the algorithm for our `doStuff` operation above, they would treat `x` as a Web IDL value of [Web IDL type `boolean`](http://heycam.github.io/webidl/#idl-boolean). Crucially, they would _not_ treat `x` as a JavaScript variable whose value is either the JavaScript `true` or `false`. They're instead working in a different type system altogether, with its own rules.
Separately from its type system, Web IDL defines a ["binding"](http://heycam.github.io/webidl/#ecmascript-binding) of the type system into JavaScript. This contains rules like: when you pass a JavaScript value to the JavaScript method that manifests a given Web IDL operation, how does that get converted into a Web IDL value? For example, a JavaScript `true` passed in the position of a Web IDL `boolean` argument becomes a Web IDL `true`. But, a JavaScript `true` passed in the position of a [Web IDL `unsigned long`](http://heycam.github.io/webidl/#idl-unsigned-long) becomes a Web IDL `1`. And so on.
Finally, we have the actual implementation code. This is usually C++, although these days [some smart people are using Rust](https://github.com/servo/servo). The implementation, of course, has its own type system. So when they implement the Web IDL algorithms, they don't actually use Web IDL values, since those aren't "real" outside of specs. Instead, implementations apply the Web IDL binding rules in such a way as to convert incoming JavaScript values into C++ values. For example, if code in the browser called `doStuff(true, true)`, then the implementation code would eventually receive a C++ `bool` containing `true` and a C++ `uint32_t` containing `1`.
The upside of all this is that implementations can abstract all the conversion logic away, letting Web IDL handle it, and focus on implementing the relevant methods in C++ with values of the correct type already provided. That is payoff of Web IDL, in a nutshell.
And getting to that payoff is the goal of _this_ project—but for JavaScript implementations, instead of C++ ones. That is, this library is designed to make it easier for JavaScript developers to write functions that behave like a given Web IDL operation. So conceptually, the conversion pipeline, which in its general form is JavaScript values ↦ Web IDL values ↦ implementation-language values, in this case becomes JavaScript values ↦ Web IDL values ↦ JavaScript values. And that intermediate step is where all the logic is performed: a JavaScript `true` becomes a Web IDL `1` in an unsigned long context, which then becomes a JavaScript `1`.
## Don't use this
Seriously, why would you ever use this? You really shouldn't. Web IDL is … strange, and you shouldn't be emulating its semantics. If you're looking for a generic argument-processing library, you should find one with better rules than those from Web IDL. In general, your JavaScript should not be trying to become more like Web IDL; if anything, we should fix Web IDL to make it more like JavaScript.
The _only_ people who should use this are those trying to create faithful implementations (or polyfills) of web platform interfaces defined in Web IDL. Its main consumer is the [jsdom](https://github.com/tmpvar/jsdom) project.
| PypiClean |
/BRAILS-3.0.1.tar.gz/BRAILS-3.0.1/brails/modules/GarageDetector/lib/efficientdet/model.py | import torch.nn as nn
import torch
from torchvision.ops.boxes import nms as nms_torch
from lib.efficientnet import EfficientNet as EffNet
from lib.efficientnet.utils import MemoryEfficientSwish, Swish
from lib.efficientnet.utils_extra import Conv2dStaticSamePadding, MaxPool2dStaticSamePadding
def nms(dets, thresh):
return nms_torch(dets[:, :4], dets[:, 4], thresh)
class SeparableConvBlock(nn.Module):
"""
created by Zylo117
"""
def __init__(self, in_channels, out_channels=None, norm=True, activation=False, onnx_export=False):
super(SeparableConvBlock, self).__init__()
if out_channels is None:
out_channels = in_channels
# Q: whether separate conv
# share bias between depthwise_conv and pointwise_conv
# or just pointwise_conv apply bias.
# A: Confirmed, just pointwise_conv applies bias, depthwise_conv has no bias.
self.depthwise_conv = Conv2dStaticSamePadding(in_channels, in_channels,
kernel_size=3, stride=1, groups=in_channels, bias=False)
self.pointwise_conv = Conv2dStaticSamePadding(in_channels, out_channels, kernel_size=1, stride=1)
self.norm = norm
if self.norm:
# Warning: pytorch momentum is different from tensorflow's, momentum_pytorch = 1 - momentum_tensorflow
self.bn = nn.BatchNorm2d(num_features=out_channels, momentum=0.01, eps=1e-3)
self.activation = activation
if self.activation:
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
if self.norm:
x = self.bn(x)
if self.activation:
x = self.swish(x)
return x
class BiFPN(nn.Module):
"""
modified by Zylo117
"""
def __init__(self, num_channels, conv_channels, first_time=False, epsilon=1e-4, onnx_export=False, attention=True):
"""
Args:
num_channels:
conv_channels:
first_time: whether the input comes directly from the efficientnet,
if True, downchannel it first, and downsample P5 to generate P6 then P7
epsilon: epsilon of fast weighted attention sum of BiFPN, not the BN's epsilon
onnx_export: if True, use Swish instead of MemoryEfficientSwish
"""
super(BiFPN, self).__init__()
self.epsilon = epsilon
# Conv layers
self.conv6_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv5_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv4_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv3_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv4_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv5_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv6_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
self.conv7_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
# Feature scaling layers
self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p5_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p4_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p3_upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.p4_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p5_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p6_downsample = MaxPool2dStaticSamePadding(3, 2)
self.p7_downsample = MaxPool2dStaticSamePadding(3, 2)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
self.first_time = first_time
if self.first_time:
self.p5_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p4_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p3_down_channel = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[0], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p5_to_p6 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
MaxPool2dStaticSamePadding(3, 2)
)
self.p6_to_p7 = nn.Sequential(
MaxPool2dStaticSamePadding(3, 2)
)
self.p4_down_channel_2 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
self.p5_down_channel_2 = nn.Sequential(
Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
)
# Weight
self.p6_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p6_w1_relu = nn.ReLU()
self.p5_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p5_w1_relu = nn.ReLU()
self.p4_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p4_w1_relu = nn.ReLU()
self.p3_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p3_w1_relu = nn.ReLU()
self.p4_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p4_w2_relu = nn.ReLU()
self.p5_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p5_w2_relu = nn.ReLU()
self.p6_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
self.p6_w2_relu = nn.ReLU()
self.p7_w2 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
self.p7_w2_relu = nn.ReLU()
self.attention = attention
def forward(self, inputs):
"""
illustration of a minimal bifpn unit
P7_0 -------------------------> P7_2 -------->
|-------------| ↑
↓ |
P6_0 ---------> P6_1 ---------> P6_2 -------->
|-------------|--------------↑ ↑
↓ |
P5_0 ---------> P5_1 ---------> P5_2 -------->
|-------------|--------------↑ ↑
↓ |
P4_0 ---------> P4_1 ---------> P4_2 -------->
|-------------|--------------↑ ↑
|--------------↓ |
P3_0 -------------------------> P3_2 -------->
"""
# downsample channels using same-padding conv2d to target phase's if not the same
# judge: same phase as target,
# if same, pass;
# elif earlier phase, downsample to target phase's by pooling
# elif later phase, upsample to target phase's by nearest interpolation
if self.attention:
p3_out, p4_out, p5_out, p6_out, p7_out = self._forward_fast_attention(inputs)
else:
p3_out, p4_out, p5_out, p6_out, p7_out = self._forward(inputs)
return p3_out, p4_out, p5_out, p6_out, p7_out
def _forward_fast_attention(self, inputs):
if self.first_time:
p3, p4, p5 = inputs
p6_in = self.p5_to_p6(p5)
p7_in = self.p6_to_p7(p6_in)
p3_in = self.p3_down_channel(p3)
p4_in = self.p4_down_channel(p4)
p5_in = self.p5_down_channel(p5)
else:
# P3_0, P4_0, P5_0, P6_0 and P7_0
p3_in, p4_in, p5_in, p6_in, p7_in = inputs
# P7_0 to P7_2
# Weights for P6_0 and P7_0 to P6_1
p6_w1 = self.p6_w1_relu(self.p6_w1)
weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)
# Connections for P6_0 and P7_0 to P6_1 respectively
p6_up = self.conv6_up(self.swish(weight[0] * p6_in + weight[1] * self.p6_upsample(p7_in)))
# Weights for P5_0 and P6_1 to P5_1
p5_w1 = self.p5_w1_relu(self.p5_w1)
weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)
# Connections for P5_0 and P6_1 to P5_1 respectively
p5_up = self.conv5_up(self.swish(weight[0] * p5_in + weight[1] * self.p5_upsample(p6_up)))
# Weights for P4_0 and P5_1 to P4_1
p4_w1 = self.p4_w1_relu(self.p4_w1)
weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)
# Connections for P4_0 and P5_1 to P4_1 respectively
p4_up = self.conv4_up(self.swish(weight[0] * p4_in + weight[1] * self.p4_upsample(p5_up)))
# Weights for P3_0 and P4_1 to P3_2
p3_w1 = self.p3_w1_relu(self.p3_w1)
weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)
# Connections for P3_0 and P4_1 to P3_2 respectively
p3_out = self.conv3_up(self.swish(weight[0] * p3_in + weight[1] * self.p3_upsample(p4_up)))
if self.first_time:
p4_in = self.p4_down_channel_2(p4)
p5_in = self.p5_down_channel_2(p5)
# Weights for P4_0, P4_1 and P3_2 to P4_2
p4_w2 = self.p4_w2_relu(self.p4_w2)
weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)
# Connections for P4_0, P4_1 and P3_2 to P4_2 respectively
p4_out = self.conv4_down(
self.swish(weight[0] * p4_in + weight[1] * p4_up + weight[2] * self.p4_downsample(p3_out)))
# Weights for P5_0, P5_1 and P4_2 to P5_2
p5_w2 = self.p5_w2_relu(self.p5_w2)
weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)
# Connections for P5_0, P5_1 and P4_2 to P5_2 respectively
p5_out = self.conv5_down(
self.swish(weight[0] * p5_in + weight[1] * p5_up + weight[2] * self.p5_downsample(p4_out)))
# Weights for P6_0, P6_1 and P5_2 to P6_2
p6_w2 = self.p6_w2_relu(self.p6_w2)
weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)
# Connections for P6_0, P6_1 and P5_2 to P6_2 respectively
p6_out = self.conv6_down(
self.swish(weight[0] * p6_in + weight[1] * p6_up + weight[2] * self.p6_downsample(p5_out)))
# Weights for P7_0 and P6_2 to P7_2
p7_w2 = self.p7_w2_relu(self.p7_w2)
weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)
# Connections for P7_0 and P6_2 to P7_2
p7_out = self.conv7_down(self.swish(weight[0] * p7_in + weight[1] * self.p7_downsample(p6_out)))
return p3_out, p4_out, p5_out, p6_out, p7_out
def _forward(self, inputs):
if self.first_time:
p3, p4, p5 = inputs
p6_in = self.p5_to_p6(p5)
p7_in = self.p6_to_p7(p6_in)
p3_in = self.p3_down_channel(p3)
p4_in = self.p4_down_channel(p4)
p5_in = self.p5_down_channel(p5)
else:
# P3_0, P4_0, P5_0, P6_0 and P7_0
p3_in, p4_in, p5_in, p6_in, p7_in = inputs
# P7_0 to P7_2
# Connections for P6_0 and P7_0 to P6_1 respectively
p6_up = self.conv6_up(self.swish(p6_in + self.p6_upsample(p7_in)))
# Connections for P5_0 and P6_1 to P5_1 respectively
p5_up = self.conv5_up(self.swish(p5_in + self.p5_upsample(p6_up)))
# Connections for P4_0 and P5_1 to P4_1 respectively
p4_up = self.conv4_up(self.swish(p4_in + self.p4_upsample(p5_up)))
# Connections for P3_0 and P4_1 to P3_2 respectively
p3_out = self.conv3_up(self.swish(p3_in + self.p3_upsample(p4_up)))
if self.first_time:
p4_in = self.p4_down_channel_2(p4)
p5_in = self.p5_down_channel_2(p5)
# Connections for P4_0, P4_1 and P3_2 to P4_2 respectively
p4_out = self.conv4_down(
self.swish(p4_in + p4_up + self.p4_downsample(p3_out)))
# Connections for P5_0, P5_1 and P4_2 to P5_2 respectively
p5_out = self.conv5_down(
self.swish(p5_in + p5_up + self.p5_downsample(p4_out)))
# Connections for P6_0, P6_1 and P5_2 to P6_2 respectively
p6_out = self.conv6_down(
self.swish(p6_in + p6_up + self.p6_downsample(p5_out)))
# Connections for P7_0 and P6_2 to P7_2
p7_out = self.conv7_down(self.swish(p7_in + self.p7_downsample(p6_out)))
return p3_out, p4_out, p5_out, p6_out, p7_out
class Regressor(nn.Module):
"""
modified by Zylo117
"""
def __init__(self, in_channels, num_anchors, num_layers, onnx_export=False):
super(Regressor, self).__init__()
self.num_layers = num_layers
self.num_layers = num_layers
self.conv_list = nn.ModuleList(
[SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)])
self.bn_list = nn.ModuleList(
[nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers)]) for j in
range(5)])
self.header = SeparableConvBlock(in_channels, num_anchors * 4, norm=False, activation=False)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, inputs):
feats = []
for feat, bn_list in zip(inputs, self.bn_list):
for i, bn, conv in zip(range(self.num_layers), bn_list, self.conv_list):
feat = conv(feat)
feat = bn(feat)
feat = self.swish(feat)
feat = self.header(feat)
feat = feat.permute(0, 2, 3, 1)
feat = feat.contiguous().view(feat.shape[0], -1, 4)
feats.append(feat)
feats = torch.cat(feats, dim=1)
return feats
class Classifier(nn.Module):
"""
modified by Zylo117
"""
def __init__(self, in_channels, num_anchors, num_classes, num_layers, onnx_export=False):
super(Classifier, self).__init__()
self.num_anchors = num_anchors
self.num_classes = num_classes
self.num_layers = num_layers
self.conv_list = nn.ModuleList(
[SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)])
self.bn_list = nn.ModuleList(
[nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers)]) for j in
range(5)])
self.header = SeparableConvBlock(in_channels, num_anchors * num_classes, norm=False, activation=False)
self.swish = MemoryEfficientSwish() if not onnx_export else Swish()
def forward(self, inputs):
feats = []
for feat, bn_list in zip(inputs, self.bn_list):
for i, bn, conv in zip(range(self.num_layers), bn_list, self.conv_list):
feat = conv(feat)
feat = bn(feat)
feat = self.swish(feat)
feat = self.header(feat)
feat = feat.permute(0, 2, 3, 1)
feat = feat.contiguous().view(feat.shape[0], feat.shape[1], feat.shape[2], self.num_anchors,
self.num_classes)
feat = feat.contiguous().view(feat.shape[0], -1, self.num_classes)
feats.append(feat)
feats = torch.cat(feats, dim=1)
feats = feats.sigmoid()
return feats
class EfficientNet(nn.Module):
"""
modified by Zylo117
"""
def __init__(self, compound_coef, load_weights=False):
super(EfficientNet, self).__init__()
model = EffNet.from_pretrained(f'efficientnet-b{compound_coef}', load_weights)
del model._conv_head
del model._bn1
del model._avg_pooling
del model._dropout
del model._fc
self.model = model
def forward(self, x):
x = self.model._conv_stem(x)
x = self.model._bn0(x)
x = self.model._swish(x)
feature_maps = []
# TODO: temporarily storing extra tensor last_x and del it later might not be a good idea,
# try recording stride changing when creating efficientnet,
# and then apply it here.
last_x = None
for idx, block in enumerate(self.model._blocks):
drop_connect_rate = self.model._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self.model._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
if block._depthwise_conv.stride == [2, 2]:
feature_maps.append(last_x)
elif idx == len(self.model._blocks) - 1:
feature_maps.append(x)
last_x = x
del last_x
return feature_maps[1:]
if __name__ == '__main__':
from tensorboardX import SummaryWriter
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad) | PypiClean |
/Flask-LwAdmin-0.6.3.tar.gz/Flask-LwAdmin-0.6.3/HISTORY.rst | .. :changelog:
History
-------
0.6.3 (2014-03-18)
++++++++++++++++++
- Bugfixes in macros (create / list / update)
0.6.2 (2013-12-31)
++++++++++++++++++
- Adding type bool display element
0.6.1 (2013-12-30)
++++++++++++++++++
- Active td in pager table for checkbox
- Bugfix in pager macro
0.6.0 (2013-10-24)
++++++++++++++++++
- API change in create, update and list macro
- API change for filters
- API change for batch_actions
0.5.1 (2013-10-16)
++++++++++++++++++
- Icon can be set for display element in list table head
- Deprecation info in list macro
- More configuration options in create and update macros and deprecation info in this macros
- HTML type action can be created
- Action can be disabled
0.5.0 (2013-10-11)
++++++++++++++++++
- API change for list configuration and list templates (object urls now generated in object class using call method)
- Changes in Jinja2 macros - breaking some macros API
0.4.2 (2013-10-08)
++++++++++++++++++
- Adding call and visable functionality for object actions in pager
0.4.1 (2013-09-30)
++++++++++++++++++
- Macros for create and update pages
0.4.0 (2013-09-29)
++++++++++++++++++
- Generators for list type page using Pager - first version for configuration API (example in Lightningwolf-SMP Projekct)
- API change for Pager
- API change for methods in Navbar class
0.3.2 (2013-09-15)
++++++++++++++++++
- Pager
0.3.1 (2013-09-15)
++++++++++++++++++
- New macros: filters, batch_actions, actions
0.3.0 (2013-09-15)
++++++++++++++++++
- New navbar methods and class location
- New macros location
- Start of Flask-Babel integration
- Start of Flask-Bootstrap integration
0.2.4 (2013-09-11)
++++++++++++++++++
- Navbar with dropdown, btn group, caret
0.2.3 (2013-09-03)
++++++++++++++++++
- Flask-Principal can be used to restrict visibility of navbar section
0.2.2 (2013-09-02)
++++++++++++++++++
- API change for Navbar creator
0.1.0 (2013-08-29)
++++++++++++++++++
- Initial version
| PypiClean |
/Babel-2.12.1.tar.gz/Babel-2.12.1/docs/api/core.rst | Core Functionality
==================
.. module:: babel.core
The core API provides the basic core functionality. Primarily it provides
the :class:`Locale` object and ways to create it. This object
encapsulates a locale and exposes all the data it contains.
All the core functionality is also directly importable from the `babel`
module for convenience.
Basic Interface
---------------
.. autoclass:: Locale
:members:
.. autofunction:: default_locale
.. autofunction:: negotiate_locale
Exceptions
----------
.. autoexception:: UnknownLocaleError
:members:
Utility Functions
-----------------
.. autofunction:: get_global
.. autofunction:: parse_locale
.. autofunction:: get_locale_identifier
| PypiClean |
/Benker-0.5.4.tar.gz/Benker-0.5.4/benker/parsers/cals/__init__.py | import re
from lxml import etree
from benker.box import Box
from benker.common.lxml_iterwalk import iterwalk
from benker.common.lxml_qname import QName
from benker.parsers.base_parser import BaseParser
from benker.parsers.cals.frame_styles import BORDER_NONE
from benker.parsers.cals.frame_styles import BORDER_SOLID
from benker.parsers.cals.frame_styles import get_frame_styles
from benker.table import Table
from benker.units import convert_value
from benker.units import parse_width
# noinspection PyProtectedMember
#: Element Type
ElementType = etree._Element
class CalsParser(BaseParser):
"""
CALS tables parser
"""
def __init__(self, builder, cals_ns=None, width_unit="mm", **options):
"""
Construct a parser
:type builder: benker.builders.base_builder.BaseBuilder
:param builder:
Builder used by this parser to instantiate :class:`~benker.table.Table` objects.
:param str cals_ns:
Namespace to use for CALS elements and attributes.
Set ``None`` (or "") if you don't use namespace in your XML.
:param str width_unit:
Unit to use for table/column widths.
Possible values are: 'cm', 'dm', 'ft', 'in', 'm', 'mm', 'pc', 'pt', 'px'.
:keyword str options: Extra conversion options.
See :meth:`~benker.converters.base_converter.BaseConverter.convert_file`
to have a list of all possible options.
.. versionchanged:: 0.5.1
Add the options *width_unit*.
"""
self.cals_ns = cals_ns
self.width_unit = width_unit
super(CalsParser, self).__init__(builder, **options)
def get_cals_qname(self, name):
return QName(self.cals_ns, name)
def transform_tables(self, tree):
if self.cals_ns:
nodes = tree.xpath("//cals:table", namespaces={"cals": self.cals_ns})
else:
nodes = tree.xpath("//table")
for node in nodes:
table = self.parse_table(node)
table_elem = self.builder.generate_table_tree(table)
parent = node.getparent()
index = parent.index(node)
parent.insert(index, table_elem)
table_elem.tail = node.tail
parent.remove(node)
# noinspection PyPep8Naming
def parse_table(self, cals_table):
"""
Convert a ``<table>`` CALS element into table object.
:type cals_table: ElementType
:param cals_table: CALS element.
:rtype: benker.table.Table
:return: Table.
"""
state = self._state
state.reset()
# -- CALS elements
cals = self.get_cals_qname
table = cals("table").text
# titles = cals("titles").text # not supported
tgroup = cals("tgroup").text
colspec = cals("colspec").text
thead = cals("thead").text
tfoot = cals("tfoot").text
tbody = cals("tbody").text
row = cals("row").text
# entrytbl = cals("entrytbl").text # not supported
entry = cals("entry").text
elements = {table, tgroup, colspec, thead, tfoot, tbody, row, entry}
context = iterwalk(cals_table, events=("start", "end"), tag=elements)
depth = 0
for action, elem in context:
elem_tag = elem.tag
if elem_tag == table:
if action == "start":
depth += 1
else:
depth -= 1
if depth > 1:
# .. note:: context.skip_subtree() is not available for all version of lxml
# This <TBL> element is inside the table.
# It will be handled separately in another call to transform_tables()
continue
if action == "start":
# tags sorted by frequency:
if elem_tag == entry:
state.next_col()
self.parse_cals_entry(elem)
elif elem_tag == row:
state.next_row()
self.parse_cals_row(elem)
elif elem_tag in {tbody, tfoot, thead}:
# everything is done in parse_fmx_row()
pass
elif elem_tag == colspec:
state.next_col()
self.parse_cals_colspec(elem)
elif elem_tag == tgroup:
self.parse_cals_tgroup(elem)
elif elem_tag == table:
self.parse_cals_table(elem)
else:
raise NotImplementedError(elem_tag)
else:
if elem_tag in {row}:
bounding_box = Box(1, state.row_pos, len(state.table.cols), state.row_pos)
state.table.fill_missing(bounding_box, None, nature=state.row.nature)
elif elem_tag == table:
state.table.fill_missing(state.table.bounding_box, None)
return state.table
def setup_table(self, styles=None, nature=None):
table = Table(styles=styles, nature=nature)
self._state.table = table
return self._state
def parse_cals_table(self, cals_table):
"""
Parse a CALS ``table`` element.
:type cals_table: ElementType
:param cals_table: CALS table Element.
:return: State of the parser (for debug purpose).
.. versionchanged:: 0.5.1
Add support for the ``@cals:width`` attribute (table width).
"""
cals = self.get_cals_qname
styles = {}
nature = None
# -- attribute @cals:frame
frame = cals_table.attrib.get(cals("frame"))
styles.update(get_frame_styles(frame))
# -- attribute @cals:colsep
colsep = cals_table.attrib.get(cals("colsep"))
colsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if colsep in colsep_map:
styles["x-cell-border-right"] = colsep_map[colsep]
# -- attribute @cals:rowsep
rowsep = cals_table.attrib.get(cals("rowsep"))
rowsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if rowsep in rowsep_map:
styles["x-cell-border-bottom"] = rowsep_map[rowsep]
# -- attribute @cals:orient
orient = cals_table.attrib.get(cals("orient"))
orient_map = {"land": "landscape", "port": "portrait"}
if orient in orient_map:
styles["x-sect-orient"] = orient_map[orient]
# -- attribute @cals:pgwide
pgwide = cals_table.attrib.get(cals("pgwide"))
pgwide_map = {"0": "2", "1": "1"}
if pgwide in pgwide_map:
styles["x-sect-cols"] = pgwide_map[pgwide]
# -- attribute @cals:bgcolor
bgcolor = cals_table.attrib.get(cals("bgcolor"))
if bgcolor:
styles["background-color"] = bgcolor
# -- attribute @cals:tabstyle
tabstyle = cals_table.attrib.get(cals("tabstyle"))
if tabstyle:
nature = tabstyle
# -- attribute @cals:tabstyle
width = cals_table.attrib.get(cals("width"))
if width:
width, unit = parse_width(width)
value = convert_value(width, unit, self.width_unit)
styles["width"] = u"{value:0.2f}{unit}".format(value=value, unit=self.width_unit)
return self.setup_table(styles, nature)
def parse_cals_tgroup(self, cals_tgroup):
cals = self.get_cals_qname
styles = {}
nature = self._state.table.nature
# -- attribute @cals:cols => ignored (*table.cols*)
# -- attribute @cals:colsep
colsep = cals_tgroup.attrib.get(cals("colsep"))
colsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if colsep in colsep_map:
styles["x-cell-border-right"] = colsep_map[colsep]
# -- attribute @cals:rowsep
rowsep = cals_tgroup.attrib.get(cals("rowsep"))
rowsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if rowsep in rowsep_map:
styles["x-cell-border-bottom"] = rowsep_map[rowsep]
# -- attribute @cals:tgroupstyle
tgroupstyle = cals_tgroup.attrib.get(cals("tgroupstyle"))
if tgroupstyle:
nature = self._state.table.nature
if nature:
parts = nature.split(" ")
nature = " ".join(parts[:-1] + [tgroupstyle])
else:
nature = tgroupstyle
# -- Override the table defaults
state = self._state
table = state.table
table.styles.update(styles)
table.nature = nature
return state # mainly for unit test
def parse_cals_row(self, cals_row):
"""
Parse a ``row`` element which contains ``entry`` elements.
This element may be in a ``BLK```
:type cals_row: ElementType
:param cals_row: table row
.. versionchanged:: 0.5.1
The "vertical-align" style is built from the ``@cals:valign`` attribute.
"""
cals = self.get_cals_qname
styles = {}
nature = None # overridden below if parent's element exists
cals_parent = cals_row.getparent() # type: ElementType
if cals_parent is not None:
# -- nature of the row
tag_map = {"thead": "header", "tfoot": "footer", "tbody": "body"}
localname = QName(cals_parent.tag).localname
nature = tag_map[localname]
# -- attribute @cals:valign
valign = cals_parent.attrib.get(cals("valign"))
valign_map = {'top': 'top', 'middle': 'middle', 'bottom': 'bottom'}
if valign in valign_map:
styles["vertical-align"] = valign_map[valign]
# -- attribute @cals:valign
valign = cals_row.attrib.get(cals("valign"))
valign_map = {'top': 'top', 'middle': 'middle', 'bottom': 'bottom'}
if valign in valign_map:
# overrides parent's value
styles["vertical-align"] = valign_map[valign]
# -- attribute @cals:rowsep
rowsep = cals_row.attrib.get(cals("rowsep"))
rowsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if rowsep in rowsep_map:
styles["border-bottom"] = rowsep_map[rowsep]
# -- attribute @cals:bgcolor
bgcolor = cals_row.attrib.get(cals("bgcolor"))
if bgcolor:
styles["background-color"] = bgcolor
# -- attribute @cals:rowstyle (extension)
rowstyle = cals_row.attrib.get(cals("rowstyle"))
if rowstyle:
# overrides the previously calculated @cals:rowstyle attribute
styles["rowstyle"] = rowstyle
# -- Create a row
state = self._state
state.row = state.table.rows[state.row_pos]
state.row.nature = nature
state.row.styles = styles
return state # mainly for unit test
def parse_cals_entry(self, cals_entry):
"""
Parse a ``entry`` element.
:type cals_entry: ElementType
:param cals_entry: table entry
.. versionchanged:: 0.5.1
The "vertical-align" style is built from the ``@cals:valign`` attribute.
.. versionchanged:: 0.5.2
Add support for the ``@cals:cellstyle`` attribute (extension).
This attribute is required for two-way conversion of Formex tables to CALS and vice versa.
If the ``CELL/@TYPE`` and the ``ROW/@TYPE`` are different, we add a specific "cellstyle" style.
This style will keep the ``CELL/@TYPE`` value.
.. versionchanged:: 0.5.3
Improved empty cells detection for Formex4 conversion (``<IE/>`` tag management).
"""
cals = self.get_cals_qname
styles = {}
# -- attribute @cals:colsep
colsep = cals_entry.attrib.get(cals("colsep"))
colsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if colsep in colsep_map:
styles["border-right"] = colsep_map[colsep]
# -- attribute @cals:rowsep
rowsep = cals_entry.attrib.get(cals("rowsep"))
rowsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if rowsep in rowsep_map:
styles["border-bottom"] = rowsep_map[rowsep]
# -- attribute @cals:bgcolor
bgcolor = cals_entry.attrib.get(cals("bgcolor"))
if bgcolor:
styles["background-color"] = bgcolor
# -- attributes @cals:namest and @cals:nameend
name_start = cals_entry.attrib.get(cals("namest"), str(self._state.col_pos))
name_end = cals_entry.attrib.get(cals("nameend"), str(self._state.col_pos))
col_start = int(re.findall(r"\d+", name_start)[0])
col_end = int(re.findall(r"\d+", name_end)[0])
width = col_end - col_start + 1
# -- attribute @cals:morerows
morerows = cals_entry.attrib.get(cals("morerows"), "0")
height = int(morerows) + 1
# -- attribute @cals:valign
valign = cals_entry.attrib.get(cals("valign"))
valign_map = {'top': 'top', 'middle': 'middle', 'bottom': 'bottom'}
if valign in valign_map:
# overrides parent's value
styles["vertical-align"] = valign_map[valign]
# -- attribute @cals:rowsep
align = cals_entry.attrib.get(cals("align"))
align_map = {"left": "left", "right": "right", "center": "center", "justify": "justify", "char": "left"}
if align in align_map:
styles["align"] = align_map[align]
# -- attribute @cals:bgcolor
cellstyle = cals_entry.attrib.get(cals("cellstyle"))
if cellstyle:
styles["cellstyle"] = cellstyle
# todo: calculate the ``@rotate`` attribute.
# -- Create a entry
text = [cals_entry.text] if cals_entry.text else []
content = text + cals_entry.getchildren()
# The detection of empty cells is used when converting
# to the Formex4 format in order to insert an empty tag ``<IE/>``.
# see: https://github.com/laurent-laporte-pro/benker/issues/13
#
# When parsing a CALS table, we will consider a cell to be empty when it contains no text
# or child tag. As we do not know the elements contained in an ``<entry>`` element,
# we cannot know precisely if it is an empty content or not (eg. empty paragraph or BR).
if not content:
styles["x-cell-empty"] = "true"
nature = self._state.row.nature
self._state.row.insert_cell(content, width=width, height=height, styles=styles, nature=nature)
return self._state # mainly for unit test
def parse_cals_colspec(self, cals_colspec):
"""
Parse a CALS-like ``colspec`` element.
For instance:
.. code-block:: xml
<colspec
colname="c1"
colnum="1"
colsep="1"
rowsep="1"
colwidth="30mm"
align="center"/>
:type cals_colspec: ElementType
:param cals_colspec: CALS-like ``colspec`` element.
"""
cals_parent = cals_colspec.getparent()
if cals_parent is not None: # pragma: no cover
localname = QName(cals_parent).localname
if localname not in {"table", "tgroup"}:
raise NotImplementedError("colspec in {} not supported".format(localname))
cals = self.get_cals_qname
styles = {}
# -- attribute @cals:colname is ignored
# -- attribute @cals:char is ignored
# -- attribute @cals:charoff is ignored
# -- attribute @cals:colnum
colnum = cals_colspec.attrib.get(cals("colnum"))
colnum = int(colnum) if colnum else self._state.col_pos
# -- attribute @cals:colsep
colsep = cals_colspec.attrib.get(cals("colsep"))
colsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if colsep in colsep_map:
styles["border-right"] = colsep_map[colsep]
# -- attribute @cals:rowsep
rowsep = cals_colspec.attrib.get(cals("rowsep"))
rowsep_map = {"0": BORDER_NONE, "1": BORDER_SOLID}
if rowsep in rowsep_map:
styles["border-bottom"] = rowsep_map[rowsep]
# -- attribute @cals:rowsep
colwidth = cals_colspec.attrib.get(cals("colwidth"))
if colwidth:
styles["width"] = colwidth
# -- attribute @cals:rowsep
align = cals_colspec.attrib.get(cals("align"))
align_map = {"left": "left", "right": "right", "center": "center", "justify": "justify", "char": "left"}
if align in align_map:
styles["align"] = align_map[align]
state = self._state
state.col = state.table.cols[colnum]
state.col.styles.update(styles)
return state | PypiClean |
/CDS-1.0.1.tar.gz/CDS-1.0.1/cds/modules/deposit/static/js/cds_deposit/avc/avc.module.js | function cdsDepositsConfig(
$locationProvider,
depositExtractedMetadataProvider,
depositStatesProvider,
depositStatusesProvider,
depositActions,
inheritedPropertiesProvider,
taskRepresentationsProvider,
urlBuilderProvider,
typeReducerProvider,
localStorageServiceProvider,
sfErrorMessageProvider,
jwtProvider,
$httpProvider,
$compileProvider
) {
$locationProvider.html5Mode({
enabled: true,
requireBase: false,
rewriteLinks: false,
});
sfErrorMessageProvider.setDefaultMessage(0, 'This field is required.');
// Local storage configuration
localStorageServiceProvider.setPrefix('cdsDeposit');
var mainStatuses = [
'file_upload',
'file_download',
'file_video_metadata_extraction',
'file_video_extract_frames',
'file_transcode',
];
// Initialize the states
depositStatesProvider.setValues(mainStatuses);
// Initialize statuses provider
depositStatusesProvider.setValues({
PENDING: 'DEPOSIT_STATE/PENDING',
STARTED: 'DEPOSIT_STATE/STARTED',
FAILURE: 'DEPOSIT_STATE/FAILURE',
SUCCESS: 'DEPOSIT_STATE/SUCCESS',
REVOKED: 'DEPOSIT_STATE/REVOKED',
});
// Initialize extracted metadata pre-fill
depositExtractedMetadataProvider.setValues({
values:{
'title': function(deposit, metadata){
if('title' in metadata){
deposit.title = {title: metadata.title};
return metadata.title;
}
},
'description': function(deposit, metadata){
if('description' in metadata){
deposit.description = metadata.description;
return metadata.description;
}
},
'keywords': function(deposit, metadata){
if('keywords' in metadata){
deposit.keywords = metadata.keywords.map(function(keyword){
return {name: keyword, value: {name: keyword}};
});
return metadata.keywords.join(', ');
}
},
'date': function(deposit, metadata){
if('creation_time' in metadata){
deposit.date = new Date(metadata.creation_time).toISOString().slice(0,10);
return deposit.date;
}
},
}
});
// Deposit actions' information
depositActions.setValues(['project', 'video'])
inheritedPropertiesProvider.setValues([
'contributors',
'date',
'description',
'keywords',
'license',
'title.title',
'translations',
]);
taskRepresentationsProvider.setValues({
file_upload: 'Video file upload',
file_download: 'Video file download',
file_transcode: 'Video transcoding',
file_video_extract_frames: 'Video frame extraction',
file_video_metadata_extraction: 'Video metadata extraction'
});
// Initialize url builder
urlBuilderProvider.setBlueprints({
iiif: '/api/iiif/v2/<%=deposit%>:<%=key%>:<%=version%>/full/<%=res%>/0/default.png',
categories: '/api/categories',
video: '/deposit/<%=deposit%>/preview/video/<%=key%>',
eventInfo: '/hooks/receivers/avc/events/<%=eventId%>',
restartEvent: '/hooks/receivers/avc/events/<%=eventId%>/tasks/<%=taskId%>',
taskFeedback: '/hooks/receivers/avc/events/<%=eventId%>/feedback',
selfVideo: '/api/deposits/video/<%=deposit%>',
bucketVideo: '/api/files/<%=bucket%>',
actionVideo: '/api/deposits/video/<%=deposit%>/actions/<%=action%>',
record: '/record/<%=recid%>',
});
// Initialize type reducer
typeReducerProvider.setBlueprints({
SUCCESS: function(type, data) {
if (type === 'update_deposit') {
this.updateDeposit(data.meta.payload.deposit);
}
},
});
// JWT Token
// Search DOM if exists
var authorized_token = document.getElementsByName('authorized_token');
if (authorized_token.length > 0) {
token = authorized_token[0].value;
// No cache on API requests
var headers = {
'Authorization': 'Bearer ' + token,
'Content-Type': 'application/json',
};
// Add no cache on all ``GET`` requests
var _get = _.merge(
headers,
{
'Cache-Control': 'no-cache, no-store, must-revalidate',
'Pragma': 'no-cache',
'Expires': 0
}
);
jwtProvider.setHeader(_get);
$httpProvider.defaults.headers['delete'] = headers;
$httpProvider.defaults.headers['post'] = headers;
$httpProvider.defaults.headers['put'] = headers;
}
// Optimize Angular on production
// $compileProvider.debugInfoEnabled(false);
}
// Inject the necessary angular services
cdsDepositsConfig.$inject = [
'$locationProvider',
'depositExtractedMetadataProvider',
'depositStatesProvider',
'depositStatusesProvider',
'depositActionsProvider',
'inheritedPropertiesProvider',
'taskRepresentationsProvider',
'urlBuilderProvider',
'typeReducerProvider',
'localStorageServiceProvider',
'sfErrorMessageProvider',
'jwtProvider',
'$httpProvider',
'$compileProvider',
];
// Register modules
angular.module('cdsDeposit.filters', []);
angular.module('cdsDeposit.providers', []);
angular.module('cdsDeposit.components', []);
angular.module('cdsDeposit.factories', []);
// Register all cdsDeposit module into one
angular.module('cdsDeposit.modules', [
'cdsDeposit.filters',
'cdsDeposit.providers',
'cdsDeposit.factories',
'cdsDeposit.components',
'LocalStorageModule',
'schemaForm',
]).config(cdsDepositsConfig);
angular
.module('cdsDeposit.filters')
.filter('taskRepr', function(taskRepresentations) {
return function(input) {
return taskRepresentations[input] || input;
};
});
angular.module('schemaForm')
.controller('invenioDynamicSelectController', ['$scope', '$controller', '$select', '$http',
function ($scope, $controller, $select, $http) {
$controller('dynamicSelectController', {$scope: $scope});
// Formats tags tokens in ``uiselectmultiple``
$scope.form.formatTokenTags = function(item) {
return {
name: item,
value: {
name: item
}
}
}
// Use this only in multiple select
if ($scope.form.type === 'uiselectmultiple') {
// store the $scope form as is not accesible inside the handler's scope below
var form = $scope.form;
$scope.$on('cds.deposit.form.keywords.inherit', function(event, record) {
form.internalModelTags = record.keywords;
});
}
if ($scope.modelArray) {
$scope.$watchCollection('modelArray', function (newValue) {
// If this is not the initial setting of the element...
if (!angular.equals($scope.select_model, {})) {
// Get the element's correct value from the array model
var formKey = $scope.form.key.slice(-1)[0],
value = $scope.modelArray[$scope.arrayIndex][formKey];
// Set ui-select's model to the correct value if needed
if (value && $scope.insideModel !== value) {
$scope.insideModel = value;
var query = $scope.$eval($scope.form.options.processQuery || 'query', {query: value});
$scope.populateTitleMap($scope.form, query);
$scope.select_model.selected = $scope.find_in_titleMap(value);
}
}
});
}
}]);
// Initialize the module
angular
.module('cdsDeposit', [
'cdsDeposit.modules',
'schemaForm',
'mgcrea.ngStrap',
'mgcrea.ngStrap.modal',
'pascalprecht.translate',
'ui.sortable',
'ui.select',
'mgcrea.ngStrap.select',
'mgcrea.ngStrap.datepicker',
'mgcrea.ngStrap.helpers.dateParser',
'mgcrea.ngStrap.tooltip',
'ngFileUpload',
'monospaced.elastic',
'invenioFiles.filters',
'afkl.lazyImage',
'hl.sticky',
'duScroll',
'toaster',
'angular-loading-bar',
]); | PypiClean |
/GuangTestBeat-0.13.1-cp38-cp38-macosx_10_9_x86_64.whl/econml/dowhy.py | import inspect
import pandas as pd
import numpy as np
import warnings
from econml.utilities import check_input_arrays, reshape_arrays_2dim, get_input_columns, MissingModule
try:
import dowhy
from dowhy import CausalModel
except ImportError as exn:
dowhy = CausalModel = MissingModule("dowhy is no longer a dependency of the main econml "
"package; install econml[dowhy] or econml[all] to require it, or install "
"dowhy separately to use dowhy from econml", exn)
class DoWhyWrapper:
"""
A wrapper class to allow user call other methods from dowhy package through EconML.
(e.g. causal graph, refutation test, etc.)
Parameters
----------
cate_estimator: instance
An instance of any CATE estimator we currently support
"""
def __init__(self, cate_estimator):
from pkg_resources import parse_version
if parse_version(dowhy.__version__) >= parse_version('0.9'):
warnings.warn("econml has not been tested with dowhy versions >= 0.9")
self._cate_estimator = cate_estimator
def _get_params(self):
init = self._cate_estimator.__init__
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = inspect.signature(init)
parameters = init_signature.parameters.values()
params = []
for p in parameters:
if p.kind == p.VAR_POSITIONAL or p.kind == p.VAR_KEYWORD:
raise RuntimeError("cate estimators should always specify their parameters in the signature "
"of their __init__ (no varargs, no varkwargs). "
f"{self._cate_estimator} with constructor {init_signature} doesn't "
"follow this convention.")
# if the argument is deprecated, ignore it
if p.default != "deprecated":
params.append(p.name)
# Extract and sort argument names excluding 'self'
return sorted(params)
def fit(self, Y, T, X=None, W=None, Z=None, *, outcome_names=None, treatment_names=None, feature_names=None,
confounder_names=None, instrument_names=None, graph=None, estimand_type="nonparametric-ate",
proceed_when_unidentifiable=True, missing_nodes_as_confounders=False,
control_value=0, treatment_value=1, target_units="ate", **kwargs):
"""
Estimate the counterfactual model from data through dowhy package.
Parameters
----------
Y: vector of length n
Outcomes for each sample
T: vector of length n
Treatments for each sample
X: optional (n, d_x) matrix (Default=None)
Features for each sample
W: optional (n, d_w) matrix (Default=None)
Controls for each sample
Z: optional (n, d_z) matrix (Default=None)
Instruments for each sample
outcome_names: optional list (Default=None)
Name of the outcome
treatment_names: optional list (Default=None)
Name of the treatment
feature_names: optional list (Default=None)
Name of the features
confounder_names: optional list (Default=None)
Name of the confounders
instrument_names: optional list (Default=None)
Name of the instruments
graph: optional
Path to DOT file containing a DAG or a string containing a DAG specification in DOT format
estimand_type: optional string
Type of estimand requested (currently only "nonparametric-ate" is supported).
In the future, may support other specific parametric forms of identification
proceed_when_unidentifiable: optional bool (Default=True)
Whether the identification should proceed by ignoring potential unobserved confounders
missing_nodes_as_confounders: optional bool (Default=False)
Whether variables in the dataframe that are not included in the causal graph should be automatically
included as confounder nodes
control_value: optional scalar (Default=0)
Value of the treatment in the control group, for effect estimation
treatment_value: optional scalar (Default=1)
Value of the treatment in the treated group, for effect estimation
target_units: optional (Default="ate")
The units for which the treatment effect should be estimated.
This can be of three types:
1. A string for common specifications of target units (namely, "ate", "att" and "atc"),
2. A lambda function that can be used as an index for the data (pandas DataFrame),
3. A new DataFrame that contains values of the effect_modifiers and effect will be estimated
only for this new data
kwargs: optional
Other keyword arguments from fit method for CATE estimator
Returns
-------
self
"""
# column names
if outcome_names is None:
outcome_names = get_input_columns(Y, prefix="Y")
if treatment_names is None:
treatment_names = get_input_columns(T, prefix="T")
if feature_names is None:
if X is not None:
feature_names = get_input_columns(X, prefix="X")
else:
feature_names = []
if confounder_names is None:
if W is not None:
confounder_names = get_input_columns(W, prefix="W")
else:
confounder_names = []
if instrument_names is None:
if Z is not None:
instrument_names = get_input_columns(Z, prefix="Z")
else:
instrument_names = []
column_names = outcome_names + treatment_names + feature_names + confounder_names + instrument_names
# transfer input to numpy arrays
Y, T, X, W, Z = check_input_arrays(Y, T, X, W, Z)
# transfer input to 2d arrays
n_obs = Y.shape[0]
Y, T, X, W, Z = reshape_arrays_2dim(n_obs, Y, T, X, W, Z)
# create dataframe
df = pd.DataFrame(np.hstack((Y, T, X, W, Z)), columns=column_names)
# currently dowhy only support single outcome and single treatment
assert Y.shape[1] == 1, "Can only accept single dimensional outcome."
assert T.shape[1] == 1, "Can only accept single dimensional treatment."
# call dowhy
self.dowhy_ = CausalModel(
data=df,
treatment=treatment_names,
outcome=outcome_names,
graph=graph,
common_causes=feature_names + confounder_names if X.shape[1] > 0 or W.shape[1] > 0 else None,
instruments=instrument_names if Z.shape[1] > 0 else None,
effect_modifiers=feature_names if X.shape[1] > 0 else None,
estimand_type=estimand_type,
proceed_when_unidetifiable=proceed_when_unidentifiable,
missing_nodes_as_confounders=missing_nodes_as_confounders
)
self.identified_estimand_ = self.dowhy_.identify_effect(proceed_when_unidentifiable=True)
method_name = "backdoor." + self._cate_estimator.__module__ + "." + self._cate_estimator.__class__.__name__
init_params = {}
for p in self._get_params():
init_params[p] = getattr(self._cate_estimator, p)
self.estimate_ = self.dowhy_.estimate_effect(self.identified_estimand_,
method_name=method_name,
control_value=control_value,
treatment_value=treatment_value,
target_units=target_units,
method_params={
"init_params": init_params,
"fit_params": kwargs,
},
)
return self
def refute_estimate(self, *, method_name, **kwargs):
"""
Refute an estimated causal effect.
If method_name is provided, uses the provided method. In the future, we may support automatic
selection of suitable refutation tests.
Following refutation methods are supported:
- Adding a randomly-generated confounder: "random_common_cause"
- Adding a confounder that is associated with both treatment and outcome: "add_unobserved_common_cause"
- Replacing the treatment with a placebo (random) variable): "placebo_treatment_refuter"
- Removing a random subset of the data: "data_subset_refuter"
For more details, see docs :mod:`dowhy.causal_refuters`
Parameters
----------
method_name: string
Name of the refutation method
kwargs: optional
Additional arguments that are passed directly to the refutation method.
Can specify a random seed here to ensure reproducible results ('random_seed' parameter).
For method-specific parameters, consult the documentation for the specific method.
All refutation methods are in the causal_refuters subpackage.
Returns
-------
RefuteResult: an instance of the RefuteResult class
"""
return self.dowhy_.refute_estimate(
self.identified_estimand_, self.estimate_, method_name=method_name, **kwargs
)
# We don't allow user to call refit_final from this class, since internally dowhy effect estimate will only update
# cate estimator but not the effect.
def refit_final(self, inference=None):
raise AttributeError(
"Method refit_final is not allowed through a dowhy object; please perform a full fit instead.")
def __getattr__(self, attr):
# don't proxy special methods
if attr.startswith('__'):
raise AttributeError(attr)
elif attr in ['_cate_estimator', 'dowhy_',
'identified_estimand_', 'estimate_']:
return super().__getattr__(attr)
elif attr.startswith('dowhy__'):
return getattr(self.dowhy_, attr[len('dowhy__'):])
elif hasattr(self.estimate_._estimator_object, attr):
if hasattr(self.dowhy_, attr):
warnings.warn("This call is ambiguous, "
"we're defaulting to CATE estimator's attribute. "
"Please add 'dowhy__' as prefix if you want to get dowhy attribute.", UserWarning)
return getattr(self.estimate_._estimator_object, attr)
else:
return getattr(self.dowhy_, attr)
def __setattr__(self, attr, value):
if attr in ['_cate_estimator', 'dowhy_',
'identified_estimand_', 'estimate_']:
super().__setattr__(attr, value)
elif attr.startswith('dowhy__'):
setattr(self.dowhy_, attr[len('dowhy__'):], value)
elif hasattr(self.estimate_._estimator_object, attr):
if hasattr(self.dowhy_, attr):
warnings.warn("This call is ambiguous, "
"we're defaulting to CATE estimator's attribute. "
"Please add 'dowhy__' as prefix if you want to set dowhy attribute.", UserWarning)
setattr(self.estimate_._estimator_object, attr, value)
else:
setattr(self.dowhy_, attr, value) | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/tailwindcss/src/lib/evaluateTailwindFunctions.js | import dlv from 'dlv'
import didYouMean from 'didyoumean'
import transformThemeValue from '../util/transformThemeValue'
import parseValue from 'postcss-value-parser'
import { normalizeScreens } from '../util/normalizeScreens'
import buildMediaQuery from '../util/buildMediaQuery'
import { toPath } from '../util/toPath'
import { withAlphaValue } from '../util/withAlphaVariable'
import { parseColorFormat } from '../util/pluginUtils'
import log from '../util/log'
function isObject(input) {
return typeof input === 'object' && input !== null
}
function findClosestExistingPath(theme, path) {
let parts = toPath(path)
do {
parts.pop()
if (dlv(theme, parts) !== undefined) break
} while (parts.length)
return parts.length ? parts : undefined
}
function pathToString(path) {
if (typeof path === 'string') return path
return path.reduce((acc, cur, i) => {
if (cur.includes('.')) return `${acc}[${cur}]`
return i === 0 ? cur : `${acc}.${cur}`
}, '')
}
function list(items) {
return items.map((key) => `'${key}'`).join(', ')
}
function listKeys(obj) {
return list(Object.keys(obj))
}
function validatePath(config, path, defaultValue, themeOpts = {}) {
const pathString = Array.isArray(path) ? pathToString(path) : path.replace(/^['"]+|['"]+$/g, '')
const pathSegments = Array.isArray(path) ? path : toPath(pathString)
const value = dlv(config.theme, pathSegments, defaultValue)
if (value === undefined) {
let error = `'${pathString}' does not exist in your theme config.`
const parentSegments = pathSegments.slice(0, -1)
const parentValue = dlv(config.theme, parentSegments)
if (isObject(parentValue)) {
const validKeys = Object.keys(parentValue).filter(
(key) => validatePath(config, [...parentSegments, key]).isValid
)
const suggestion = didYouMean(pathSegments[pathSegments.length - 1], validKeys)
if (suggestion) {
error += ` Did you mean '${pathToString([...parentSegments, suggestion])}'?`
} else if (validKeys.length > 0) {
error += ` '${pathToString(parentSegments)}' has the following valid keys: ${list(
validKeys
)}`
}
} else {
const closestPath = findClosestExistingPath(config.theme, pathString)
if (closestPath) {
const closestValue = dlv(config.theme, closestPath)
if (isObject(closestValue)) {
error += ` '${pathToString(closestPath)}' has the following keys: ${listKeys(
closestValue
)}`
} else {
error += ` '${pathToString(closestPath)}' is not an object.`
}
} else {
error += ` Your theme has the following top-level keys: ${listKeys(config.theme)}`
}
}
return {
isValid: false,
error,
}
}
if (
!(
typeof value === 'string' ||
typeof value === 'number' ||
typeof value === 'function' ||
value instanceof String ||
value instanceof Number ||
Array.isArray(value)
)
) {
let error = `'${pathString}' was found but does not resolve to a string.`
if (isObject(value)) {
let validKeys = Object.keys(value).filter(
(key) => validatePath(config, [...pathSegments, key]).isValid
)
if (validKeys.length) {
error += ` Did you mean something like '${pathToString([...pathSegments, validKeys[0]])}'?`
}
}
return {
isValid: false,
error,
}
}
const [themeSection] = pathSegments
return {
isValid: true,
value: transformThemeValue(themeSection)(value, themeOpts),
}
}
function extractArgs(node, vNodes, functions) {
vNodes = vNodes.map((vNode) => resolveVNode(node, vNode, functions))
let args = ['']
for (let vNode of vNodes) {
if (vNode.type === 'div' && vNode.value === ',') {
args.push('')
} else {
args[args.length - 1] += parseValue.stringify(vNode)
}
}
return args
}
function resolveVNode(node, vNode, functions) {
if (vNode.type === 'function' && functions[vNode.value] !== undefined) {
let args = extractArgs(node, vNode.nodes, functions)
vNode.type = 'word'
vNode.value = functions[vNode.value](node, ...args)
}
return vNode
}
function resolveFunctions(node, input, functions) {
return parseValue(input)
.walk((vNode) => {
resolveVNode(node, vNode, functions)
})
.toString()
}
let nodeTypePropertyMap = {
atrule: 'params',
decl: 'value',
}
/**
* @param {string} path
* @returns {Iterable<[path: string, alpha: string|undefined]>}
*/
function* toPaths(path) {
// Strip quotes from beginning and end of string
// This allows the alpha value to be present inside of quotes
path = path.replace(/^['"]+|['"]+$/g, '')
let matches = path.match(/^([^\s]+)(?![^\[]*\])(?:\s*\/\s*([^\/\s]+))$/)
let alpha = undefined
yield [path, undefined]
if (matches) {
path = matches[1]
alpha = matches[2]
yield [path, alpha]
}
}
/**
*
* @param {any} config
* @param {string} path
* @param {any} defaultValue
*/
function resolvePath(config, path, defaultValue) {
const results = Array.from(toPaths(path)).map(([path, alpha]) => {
return Object.assign(validatePath(config, path, defaultValue, { opacityValue: alpha }), {
resolvedPath: path,
alpha,
})
})
return results.find((result) => result.isValid) ?? results[0]
}
export default function (context) {
let config = context.tailwindConfig
let functions = {
theme: (node, path, ...defaultValue) => {
let { isValid, value, error, alpha } = resolvePath(
config,
path,
defaultValue.length ? defaultValue : undefined
)
if (!isValid) {
let parentNode = node.parent
let candidate = parentNode?.raws.tailwind?.candidate
if (parentNode && candidate !== undefined) {
// Remove this utility from any caches
context.markInvalidUtilityNode(parentNode)
// Remove the CSS node from the markup
parentNode.remove()
// Show a warning
log.warn('invalid-theme-key-in-class', [
`The utility \`${candidate}\` contains an invalid theme value and was not generated.`,
])
return
}
throw node.error(error)
}
let maybeColor = parseColorFormat(value)
let isColorFunction = maybeColor !== undefined && typeof maybeColor === 'function'
if (alpha !== undefined || isColorFunction) {
if (alpha === undefined) {
alpha = 1.0
}
value = withAlphaValue(maybeColor, alpha, maybeColor)
}
return value
},
screen: (node, screen) => {
screen = screen.replace(/^['"]+/g, '').replace(/['"]+$/g, '')
let screens = normalizeScreens(config.theme.screens)
let screenDefinition = screens.find(({ name }) => name === screen)
if (!screenDefinition) {
throw node.error(`The '${screen}' screen does not exist in your theme.`)
}
return buildMediaQuery(screenDefinition)
},
}
return (root) => {
root.walk((node) => {
let property = nodeTypePropertyMap[node.type]
if (property === undefined) {
return
}
node[property] = resolveFunctions(node, node[property], functions)
})
}
} | PypiClean |
/ImageD11-1.9.9.tar.gz/ImageD11-1.9.9/sandbox/list_folder_images.py | from __future__ import print_function
"""
For a peak search interface
Try to navigate the output of 'ls' in folders with lots of files
Compress the list of names when there are a lot of numbers
"""
import re, os, time, sys
try:
from os import scandir
except ImportError:
class wrap:
def __init__(self, name ):
self.name = name
def is_dir(self):
return os.path.isdir( self.name )
def scandir( folder ):
for name in os.listdir( folder ):
yield wrap( name )
def list_a_folder( folder ):
"""
Lists files in the folder
Groups into:
directories
names of type (stem)(num)(extn)
things it doesn't understand (no number)
"""
# NotGreedyAnything:Digit:~Digit:End
reg = re.compile("(.*?)(\d*)(\D*)$")
direcs = []
names = []
files = {} # keyed by extension, then stem, then num sorted
for f in scandir( folder ):
if f.is_dir():
direcs.append( f.name )
continue
items = reg.match( f.name )
if items:
stem, num, extn = items.groups()
if len(num)==0:
names.append( f.name )
continue
if extn in files:
if stem in files[extn]:
files[extn][stem].append( num )
else:
files[extn][stem]=[num,]
else:
files[extn]={stem:[num,]}
else:
names.append( f.name )
# sort the numbers
for extn in files:
for stem in files[extn]:
try:
dsu = [ (int(s),s) for s in files[extn][stem] ]
except:
print(files[extn][stem] )
raise
dsu.sort()
files[extn][stem] = [s[1] for s in dsu]
return direcs, files, names
def print_a_folder( folder ):
direcs , files, names = list_a_folder( folder )
print("In folder:",folder)
if len(direcs)>0:
print(" Directories:")
for d in sorted(direcs):
print("\t",d)
if len(names)>0:
print(" Files:")
for n in sorted(names):
print("\t",n)
if len(files)>0:
for extn in files:
for stem in files[extn]:
nums = files[extn][stem]
if len(nums)>3:
print("\t",stem+nums[0]+extn)
print("\t",stem+("?"*len(nums[0]))+extn,
" ... skip ",len(nums)-2,"files")
print("\t",stem+nums[-1]+extn,
" ... total ",len(nums),"files")
else:
for num in nums:
print("\t",stem+num+extn)
if __name__=="__main__":
if len(sys.argv)>1:
print_a_folder( sys.argv[1] )
else:
print_a_folder( "." ) | PypiClean |
/GraphiPy-0.0.2a0-py3-none-any.whl/graphipy/graph/graph_neo4j.py | from py2neo import Graph
import os
import csv
import pprint
from graphipy.graph.graph_base import BaseGraph
class NeoGraph(BaseGraph):
def __init__(self, credentials):
BaseGraph.__init__(self)
self._type = "neo4j"
self.graph = Graph(credentials)
self.path = os.getcwd() + "\\csv"
if not os.path.exists(self.path):
os.mkdir(self.path)
def graph_type(self):
return self._type
def get_labels(self, cursor, _type):
labels = []
if _type == "node":
for record in cursor:
for l in record:
labels.append(l)
else:
for record in cursor:
labels.append(record["type(n)"])
return labels
def get_node(self, label_attribute):
return self.execute("MATCH (n) WHERE n.label_attribute = '" + label_attribute + "' RETURN n")
def export_helper(self, labels, _type, prefix):
""" helper function for export """
# Create folders to export to
export_path = self.path + "\\" + prefix + "\\"
export_path_node = export_path + "nodes\\"
export_path_edge = export_path + "edges\\"
if not os.path.exists(export_path):
os.mkdir(export_path)
if not os.path.exists(export_path_node):
os.mkdir(export_path_node)
if not os.path.exists(export_path_edge):
os.mkdir(export_path_edge)
for label in labels:
if _type == "node":
query = "MATCH (n) WHERE n.label_attribute = '" + \
label + "' RETURN n"
else:
query = "MATCH (m)-[n:`" + label + "`]->(o) RETURN n"
data = self.graph.run(query).data()
if not data:
return
if _type == "node":
path = export_path_node
else:
path = export_path_edge
if not os.path.exists(export_path):
os.mkdir(export_path)
with open(
path + label + ".csv",
"w", newline="", encoding="utf-8") as outfile:
w = csv.DictWriter(
outfile, data[0]["n"].keys(), extrasaction="ignore")
w.writeheader()
for record in data:
w.writerow(record["n"])
return export_path
def export_all_csv(self, prefix):
""" exports the whole graph as CSV file and returns path to file """
query = "MATCH (n) WHERE EXISTS (n.label_attribute) RETURN DISTINCT n.label_attribute"
cursor = self.graph.run(query)
labels = self.get_labels(cursor, "node")
self.export_helper(labels, "node", prefix)
query = "MATCH (m)-[n]->(o) RETURN distinct type(n)"
cursor = self.graph.run(query).data()
labels = self.get_labels(cursor, "edge")
return self.export_helper(labels, "edge", prefix)
def export_csv(self, prefix, node_option=set(), edge_option=set()):
""" exports selected nodes as separate CSV files and returns path to file """
self.export_helper(node_option, "node", prefix)
return self.export_helper(edge_option, "edge", prefix)
def export_csv_attr(self, prefix, node_option={}, edge_option=set()):
"""
allows user to select specific attributes for each node
node_option = {
"node_label": [attribute1, attribute2, attribute3, ...]
}
if no attribute is specified, returns all the attributes
function returns path to file exported
"""
# Create folders to export to
export_path = self.path + "\\" + prefix + "\\"
export_path_node = export_path + "nodes\\"
if not os.path.exists(export_path):
os.mkdir(export_path)
if not os.path.exists(export_path_node):
os.mkdir(export_path_node)
for key in node_option:
query = ["MATCH (n) WHERE n.label_attribute = '",
key.lower(), "' RETURN "]
csv_file = open(export_path_node + key + ".csv", "w")
if not node_option[key]:
query.append("n")
query = ''.join(query)
self.graph.run(query).to_table().write_csv(file=csv_file)
else:
for attribute in node_option[key]:
query.append("n.")
query.append(attribute.lower())
query.append(",")
query.pop()
query = ''.join(query)
self.graph.run(query).to_table().write_csv(file=csv_file)
csv_file.close()
return self.export_helper(edge_option, "edge", prefix)
def create_node(self, node):
""" Inserts a node into the graph """
parameter_dict = {'params': vars(node)}
query_list = [
"MERGE (node: `",
node.Label,
"` {Id: '",
node.get_id(),
"'}) SET node = {params}"
]
query = ''.join(query_list)
self.graph.run(query, parameters=parameter_dict)
def create_edge(self, edge):
""" Creates a relationship between two nodes """
source = edge.Source
target = edge.Target
parameter_dict = {'params': vars(edge)}
query_list = [
"MATCH (source {Id: '",
source,
"'}) MATCH(target {Id: '",
target,
"'}) MERGE(source)-[r:`",
edge.Label,
"`]->(target) SET r = {params}"
]
query = ''.join(query_list)
self.graph.run(query, parameters=parameter_dict)
def get_nodes(self):
""" returns a neo4j cursor of all the nodes """
return self.graph.run("MATCH (n) RETURN n").data()
def get_edges(self):
""" returns a neo4j cursor of all the edges """
return self.graph.run("MATCH (n)-[r]->(m) RETURN r").data()
def execute(self, query, param={}):
""" Allows users to execute their own query """
return self.graph.run(query, parameters=param)
def delete_graph(self):
""" Deletes all nodes and relationships in the graph """
self.graph.run("MATCH (n) DETACH DELETE n") | PypiClean |
/OPAF-0.9.2.tar.gz/OPAF-0.9.2/opaflib/miniPDF.py | import struct
#For constructing a minimal pdf file
## PDF REference 3rd edition:: 3.2 Objects
class PDFObject:
def __init__(self):
self.n=None
self.v=None
def __str__(self):
raise Exception("Fail")
## PDF REference 3rd edition:: 3.2.1 Booleans Objects
class PDFBool(PDFObject):
def __init__(self,s):
PDFObject.__init__(self)
self.s=s
def __str__(self):
if self.s:
return "true"
return "false"
## PDF REference 3rd edition:: 3.2.2 Numeric Objects
class PDFNum(PDFObject):
def __init__(self,s):
PDFObject.__init__(self)
self.s=s
def __str__(self):
return "%s"%self.s
## PDF REference 3rd edition:: 3.2.3 String Objects
class PDFString(PDFObject):
def __init__(self,s):
PDFObject.__init__(self)
self.s=s
def __str__(self):
return "(%s)"%self.s
## PDF REference 3rd edition:: 3.2.3 String Objects / Hexadecimal Strings
class PDFHexString(PDFObject):
def __init__(self,s):
PDFObject.__init__(self)
self.s=s
def __str__(self):
return "<" + "".join(["%02x"%ord(c) for c in self.s]) + ">"
## A convenient type of literal Strings
class PDFOctalString(PDFObject):
def __init__(self,s):
PDFObject.__init__(self)
self.s="".join(["\\%03o"%ord(c) for c in s])
def __str__(self):
return "(%s)"%self.s
## PDF REference 3rd edition:: 3.2.4 Name Objects
class PDFName(PDFObject):
def __init__(self,s):
PDFObject.__init__(self)
self.s=s
def __str__(self):
return "/%s"%self.s
## PDF REference 3rd edition:: 3.2.5 Array Objects
class PDFArray(PDFObject):
def __init__(self,s):
PDFObject.__init__(self)
assert type(s) == type([])
self.s=s
def append(self,o):
self.s.append(o)
return self
def __str__(self):
return "[%s]"%(" ".join([ o.__str__() for o in self.s]))
## PDF REference 3rd edition:: 3.2.6 Dictionary Objects
class PDFDict(PDFObject):
def __init__(self, d={}):
PDFObject.__init__(self)
self.dict = {}
for k in d:
self.dict[k]=d[k]
def __iter__(self):
for k in self.dict.keys():
yield k
def __iterkeys__(self):
for k in self.dict.keys():
yield k
def __getitem__(self, key):
return self.dict[key]
def add(self,name,obj):
self.dict[name] = obj
def get(self,name):
if name in self.dict.keys():
return self.dict[name]
else:
return None
def __str__(self):
s="<<"
for name in self.dict:
s+="%s %s "%(PDFName(name),self.dict[name])
s+=">>"
return s
## PDF REference 3rd edition:: 3.2.7 Stream Objects
class PDFStream(PDFDict):
def __init__(self,d={},stream=""):
PDFDict.__init__(self,d)
self.stream=stream
self.filtered=self.stream
self.add('Length', len(stream))
self.filters = []
def appendFilter(self, filter):
self.filters.append(filter)
self._applyFilters() #yeah every time .. so what!
def _applyFilters(self):
self.filtered = self.stream
for f in self.filters:
self.filtered = f.encode(self.filtered)
if len(self.filters)>0:
self.add('Length', len(self.filtered))
self.add('Filter', PDFArray([f.name for f in self.filters]))
#Add Filter parameters ?
def __str__(self):
self._applyFilters() #yeah every time .. so what!
s=""
s+=PDFDict.__str__(self)
s+="\nstream\n"
s+=self.filtered
s+="\nendstream"
return s
## PDF REference 3rd edition:: 3.2.8 Null Object
class PDFNull(PDFObject):
def __init__(self):
PDFObject.__init__(self)
def __str__(self):
return "null"
## PDF REference 3rd edition:: 3.2.9 Indirect Objects
class UnResolved(PDFObject):
def __init__(self,n,v):
PDFObject.__init__(self)
self.n=n
self.v=v
def __str__(self):
return "UNRESOLVED(%d %d)"%(self.n,self.v)
class PDFRef(PDFObject):
def __init__(self,obj):
PDFObject.__init__(self)
self.obj=[obj]
def __str__(self):
if len(self.obj)==0:
return "null"
return "%d %d R"%(self.obj[0].n,self.obj[0].v)
## PDF REference 3rd edition:: 3.3 Filters
## Example Filter...
class FlateDecode:
name = PDFName('FlateDecode')
def __init__(self):
pass
def encode(self,stream):
return zlib.compress(stream)
def decode(self,stream):
return zlib.decompress(stream)
## PDF REference 3rd edition:: 3.4 File Structure
## Simplest file structure...
class PDFDoc():
def __init__(self,obfuscate=0):
self.objs=[]
self.info=None
self.root=None
def setRoot(self,root):
self.root=root
def setInfo(self,info):
self.info=info
def _add(self,obj):
if obj.v!=None or obj.n!=None:
raise Exception("Already added!!!")
obj.v=0
obj.n=1+len(self.objs)
self.objs.append(obj)
def add(self,obj):
if type(obj) != type([]):
self._add(obj);
else:
for o in obj:
self._add(o)
def _header(self):
return "%PDF-1.5\n%\xE7\xF3\xCF\xD3\n"
def __str__(self):
doc1 = self._header()
xref = {}
for obj in self.objs:
xref[obj.n] = len(doc1)
doc1+="%d %d obj\n"%(obj.n,obj.v)
doc1+=obj.__str__()
doc1+="\nendobj\n"
posxref=len(doc1)
doc1+="xref\n"
doc1+="0 %d\n"%(len(self.objs)+1)
doc1+="0000000000 65535 f \n"
for xr in xref.keys():
doc1+= "%010d %05d n \n"%(xref[xr],0)
doc1+="trailer\n"
trailer = PDFDict()
trailer.add("Size",len(self.objs)+1)
if self.root == None:
raise Exception("Root not set!")
trailer.add("Root",PDFRef(self.root))
if self.info:
trailer.add("Info",PDFRef(self.info))
doc1+=trailer.__str__()
doc1+="\nstartxref\n%d\n"%posxref
doc1+="%%EOF"
return doc1 | PypiClean |
/NovalIDE-1.1.8-py3-none-any.whl/noval/util/xmlmarshaller.py |
from __future__ import print_function
import sys
from types import *
import logging
import xml.sax
import xml.sax.handler
import xml.sax.saxutils
import datetime
import noval.util.utillang as utillang
import noval.util.objutils as objutils
import noval.util.apputils as apputils
import noval.util.logger as logger
MODULE_PATH = "__main__"
## ToDO remove maxOccurs "unbounded" resolves to -1 hacks after bug 177 is fixed
##unboundedVal = 2147483647 # value used for maxOccurs == "unbounded"
"""
Special attributes that we recognize:
name: __xmlname__
type: string
description: the name of the xml element for the marshalled object
name: __xmlattributes__
type: tuple or list
description: the name(s) of the Lang string attribute(s) to be
marshalled as xml attributes instead of nested xml elements. currently
these can only be strings since there"s not a way to get the type
information back when unmarshalling.
name: __xmlexclude__
type: tuple or list
description: the name(s) of the lang attribute(s) to skip when
marshalling.
name: __xmlrename__
type: dict
description: describes an alternate Lang <-> XML name mapping.
Normally the name mapping is the identity function. __xmlrename__
overrides that. The keys are the Lang names, the values are their
associated XML names.
name: __xmlflattensequence__
type: dict, tuple, or list
description: the name(s) of the Lang sequence attribute(s) whose
items are to be marshalled as a series of xml elements (with an
optional keyword argument that specifies the element name to use) as
opposed to containing them in a separate sequence element, e.g.:
myseq = (1, 2)
<!-- normal way of marshalling -->
<myseq>
<item objtype="int">1</item>
<item objtype="int">2</item>
</myseq>
<!-- with __xmlflattensequence__ set to {"myseq": "squish"} -->
<squish objtype="int">1</squish>
<squish objtype="int">2</squish>
name: __xmlnamespaces__
type: dict
description: a dict of the namespaces that the object uses. Each item
in the dict should consist of a prefix,url combination where the key is
the prefix and url is the value, e.g.:
__xmlnamespaces__ = { "xsd":"http://www.w3c.org/foo.xsd" }
name: __xmldefaultnamespace__
type: String
description: the prefix of a namespace defined in __xmlnamespaces__ that
should be used as the default namespace for the object.
name: __xmlattrnamespaces__
type: dict
description: a dict assigning the Lang object"s attributes to the namespaces
defined in __xmlnamespaces__. Each item in the dict should consist of a
prefix,attributeList combination where the key is the prefix and the value is
a list of the Lang attribute names. e.g.:
__xmlattrnamespaces__ = { "ag":["firstName", "lastName", "addressLine1", "city"] }
name: __xmlattrgroups__
type: dict
description: a dict specifying groups of attributes to be wrapped in an enclosing tag.
The key is the name of the enclosing tag; the value is a list of attributes to include
within it. e.g.
__xmlattrgroups__ = {"name": ["firstName", "lastName"], "address": ["addressLine1", "city", "state", "zip"]}
name: __xmlcdatacontent__
type: string
description: value is the name of a string attribute that should be assigned CDATA content from the
source document and that should be marshalled as CDATA.
__xmlcdatacontent__ = "messyContent"
"""
try:
long
unicode
basestring
except:
long = int
unicode = str
basestring = str
global xmlMarshallerLogger
xmlMarshallerLogger = logging.getLogger("novalide.util.xmlmarshaller.marshal")
def ag_className(obj):
return obj.__class__.__name__
class Error(Exception):
"""Base class for errors in this module."""
pass
class UnhandledTypeException(Error):
"""Exception raised when attempting to marshal an unsupported
type.
"""
def __init__(self, typename):
self.typename = typename
def __str__(self):
return "%s is not supported for marshalling." % str(self.typename)
class XMLAttributeIsNotStringType(Error):
"""Exception raised when an object"s attribute is specified to be
marshalled as an XML attribute of the enclosing object instead of
a nested element.
"""
def __init__(self, attrname, typename):
self.attrname = attrname
self.typename = typename
def __str__(self):
return """%s was set to be marshalled as an XML attribute
instead of a nested element, but the object"s type is %s, not
string.""" % (self.attrname, self.typename)
class MarshallerException(Exception):
pass
class UnmarshallerException(Exception):
pass
################################################################################
#
# constants and such
#
################################################################################
XMLNS = "xmlns"
XMLNS_PREFIX = XMLNS + ":"
XMLNS_PREFIX_LENGTH = len(XMLNS_PREFIX)
DEFAULT_NAMESPACE_KEY = "__DEFAULTNS__"
TYPE_QNAME = "QName"
XMLSCHEMA_XSD_URL = "http://www.w3.org/2001/XMLSchema"
AG_URL = "http://www.activegrid.com/ag.xsd"
BASETYPE_ELEMENT_NAME = "item"
DICT_ITEM_NAME = "qqDictItem"
DICT_ITEM_KEY_NAME = "key"
DICT_ITEM_VALUE_NAME = "value"
# This list doesn"t seem to be used.
# Internal documentation or useless? You make the call!
##MEMBERS_TO_SKIP = ("__module__", "__doc__", "__xmlname__", "__xmlattributes__",
## "__xmlexclude__", "__xmlflattensequence__", "__xmlnamespaces__",
## "__xmldefaultnamespace__", "__xmlattrnamespaces__",
## "__xmlattrgroups__")
################################################################################
#
# classes and functions
#
################################################################################
def setattrignorecase(object, name, value):
## print "[setattrignorecase] name = %s, value = %s" % (name, value)
if (name not in object.__dict__):
namelow = name.lower()
for attr in object.__dict__:
if attr.lower() == namelow:
object.__dict__[attr] = value
return
object.__dict__[name] = value
def getComplexType(obj):
if (hasattr(obj, "_instancexsdcomplextype")):
return obj._instancexsdcomplextype
if (hasattr(obj, "__xsdcomplextype__")):
return obj.__xsdcomplextype__
return None
def _objectfactory(objtype, objargs=None, objclass=None):
"dynamically create an object based on the objtype and return it."
if not isinstance(objargs, list):
objargs = [objargs]
if (objclass != None):
obj = None
if (len(objargs) > 0):
if (hasattr(objclass, "__xmlcdatacontent__")):
obj = objclass()
contentAttr = obj.__xmlcdatacontent__
obj.__dict__[contentAttr] = str(objargs[0])
else:
obj = objclass(*objargs)
else:
obj = objclass()
if ((obj != None) and (hasattr(obj, 'postUnmarshal'))):
obj.postUnmarshal()
return obj
return objutils.newInstance(objtype, objargs)
class GenericXMLObject(object):
def __init__(self, content=None):
if content != None:
self._content = content
self.__xmlcontent__ = '_content'
def __str__(self):
return "GenericXMLObject(%s)" % objutils.toDiffableString(self.__dict__)
def setXMLAttributes(self, xmlName, attrs=None, children=None, nsMap=None, defaultNS=None):
if xmlName != None:
i = xmlName.rfind(':')
if i < 0:
self.__xmlname__ = xmlName
if defaultNS != None:
self.__xmldefaultnamespace__ = str(defaultNS)
else:
self.__xmlname__ = xmlName[i+1:]
prefix = xmlName[:i]
if nsMap.has_key(prefix):
self.__xmldefaultnamespace__ = str(nsMap[prefix])
if attrs != None:
for attrname, attr in attrs.items():
attrname = str(attrname)
if attrname == XMLNS or attrname.startswith(XMLNS_PREFIX):
pass
elif attrname == "objtype":
pass
else:
if not hasattr(self, '__xmlattributes__'):
self.__xmlattributes__ = []
i = attrname.rfind(':')
if i >= 0:
prefix = attrname[:i]
attrname = attrname[i+1:]
if not hasattr(self, '__xmlattrnamespaces__'):
self.__xmlattrnamespaces__ = {}
if self.__xmlattrnamespaces__.has_key(prefix):
alist = self.__xmlattrnamespaces__[prefix]
else:
alist = []
alist.append(attrname)
self.__xmlattrnamespaces__[prefix] = alist
self.__xmlattributes__.append(attrname)
if hasattr(self, '__xmlattributes__'):
self.__xmlattributes__.sort()
if children != None and len(children) > 0:
childList = []
flattenList = {}
for childname, child in children:
childstr = str(childname)
if childstr in childList:
if not flattenList.has_key(childstr):
flattenList[childstr] = (childstr,)
else:
childList.append(childstr)
if len(flattenList) > 0:
self.__xmlflattensequence__ = flattenList
def initialize(self, arg1=None):
pass
class Element:
def __init__(self, name, attrs=None, xsname=None):
self.name = name
self.attrs = attrs
self.content = ""
self.children = []
self.objclass = None
self.xsname = xsname
self.objtype = None
def getobjtype(self):
# objtype = self.attrs.get("objtype")
objtype = self.objtype
if (objtype == None):
if (len(self.children) > 0):
objtype = "dict"
else:
objtype = "str"
return objtype
class NsElement(object):
def __init__(self):
self.nsMap = {}
self.targetNS = None
self.defaultNS = None
self.prefix = None
def __str__(self):
if self.prefix == None:
strVal = 'prefix = None; '
else:
strVal = 'prefix = "%s"; ' % (self.prefix)
if self.targetNS == None:
strVal += 'targetNS = None; '
else:
strVal += 'targetNS = "%s"; ' % (self.targetNS)
if self.defaultNS == None:
strVal += 'defaultNS = None; '
else:
strVal += 'defaultNS = "%s"; ' % (self.defaultNS)
if len(self.nsMap) == 0:
strVal += 'nsMap = None; '
else:
strVal += 'nsMap = {'
for ik, iv in self.nsMap.iteritems():
strVal += '%s=%s; ' % (ik,iv)
strVal += '}'
return strVal
def setKnownTypes(self, masterKnownTypes, masterKnownNamespaces, parentNSE):
# if we're a nested element, extend our parent element's mapping
if parentNSE != None:
self.knownTypes = parentNSE.knownTypes.copy()
# but if we have a different default namespace, replace the parent's default mappings
if (self.defaultNS != None) and (parentNSE.defaultNS != self.defaultNS):
newKT = self.knownTypes.copy()
for tag in newKT:
if tag.find(':') < 0:
del self.knownTypes[tag]
newMap = parentNSE.nsMap.copy()
if self.nsMap != {}:
for k, v in self.nsMap.iteritems():
newMap[k] = v
self.nsMap = newMap
else:
self.knownTypes = {}
reversedKNS = {}
# TODO: instead of starting with the knownNamespaces, start with the "xmlms" mappings
# for this element. Then we'd only process the namespaces and tags we need to.
# But for now, this works.
for long, short in masterKnownNamespaces.iteritems():
reversedKNS[short] = long
mapLongs = self.nsMap.values()
for tag, mapClass in masterKnownTypes.iteritems():
i = tag.rfind(':')
if i >= 0: # e.g. "wsdl:description"
knownTagShort = tag[:i] # "wsdl"
knownTagName = tag[i+1:] # "description"
knownTagLong = reversedKNS[knownTagShort] # e.g. "http://schemas.xmlsoap.org/wsdl"
if (knownTagLong in mapLongs):
for mShort, mLong in self.nsMap.iteritems():
if mLong == knownTagLong:
actualShort = mShort # e.g. "ws"
actualTag = '%s:%s' % (actualShort, knownTagName)
self.knownTypes[actualTag] = mapClass
break
if self.defaultNS == knownTagLong:
self.knownTypes[knownTagName] = mapClass
else: # e.g. "ItemSearchRequest"
self.knownTypes[tag] = mapClass
def expandQName(self, eName, attrName, attrValue):
bigValue = attrValue
i = attrValue.rfind(':')
if (i < 0):
if self.defaultNS != None:
bigValue = '%s:%s' % (self.defaultNS, attrValue)
else:
attrNS = attrValue[:i]
attrNCName = attrValue[i+1:]
for shortNs, longNs in self.nsMap.iteritems():
if shortNs == attrNS:
bigValue = '%s:%s' % (longNs, attrNCName)
break
return bigValue
class XMLObjectFactory(xml.sax.ContentHandler):
def __init__(self, knownTypes=None, knownNamespaces=None, xmlSource=None, createGenerics=False):
self.rootelement = None
if xmlSource == None:
self.xmlSource = "unknown"
else:
self.xmlSource = xmlSource
self.createGenerics = createGenerics
self.skipper = False
self.elementstack = []
self.nsstack = []
self.collectContent = None
if (knownNamespaces == None):
self.knownNamespaces = {}
else:
self.knownNamespaces = knownNamespaces
self.reversedNamespaces = {}
for longns, shortns in self.knownNamespaces.items():
self.reversedNamespaces[shortns] = longns
self.knownTypes = {}
if (knownTypes != None):
for tag, cls in knownTypes.items():
i = tag.rfind(':')
if i >= 0:
shortns = tag[:i]
tag = tag[i+1:]
if shortns not in self.reversedNamespaces:
errorString = 'Error unmarshalling XML document from source "%s": knownTypes specifies an unmapped short namespace "%s" for element "%s"' % (self.xmlSource, shortns, tag)
raise UnmarshallerException(errorString)
longns = self.reversedNamespaces[shortns]
tag = '%s:%s' % (longns, tag)
self.knownTypes[tag] = cls
#printKnownTypes(self.knownTypes, 'Unmarshaller.XMLObjectFactory.__init__')
xml.sax.handler.ContentHandler.__init__(self)
def appendElementStack(self, newElement, newNS):
self.elementstack.append(newElement)
if (len(self.nsstack) > 0):
oldNS = self.nsstack[-1]
if newNS.defaultNS == None:
newNS.defaultNS = oldNS.defaultNS
if newNS.targetNS == None:
newNS.targetNS = oldNS.targetNS
if len(newNS.nsMap) == 0:
newNS.nsMap = oldNS.nsMap
elif len(oldNS.nsMap) > 0:
map = oldNS.nsMap.copy()
map.update(newNS.nsMap)
newNS.nsMap = map
self.nsstack.append(newNS)
return newNS
def popElementStack(self):
element = self.elementstack.pop()
nse = self.nsstack.pop()
return element, nse
## ContentHandler methods
def startElement(self, name, attrs):
## print '[startElement] <%s>' % (name)
if name == 'xs:annotation' or name == 'xsd:annotation': # should use namespace mapping here
self.skipper = True
self.appendElementStack(Element(name, attrs.copy()), NsElement())
if self.skipper:
return
if self.collectContent != None:
strVal = '<%s' % (name)
for aKey, aVal in attrs.items():
strVal += (' %s="%s"' % (aKey, aVal))
strVal += '>'
self.collectContent.content += strVal
xsname = name
i = name.rfind(':')
if i >= 0:
nsname = name[:i]
name = name[i+1:]
else:
nsname = None
element = Element(name, attrs.copy(), xsname=xsname)
# if the element has namespace attributes, process them and add them to our stack
nse = NsElement()
objtype = None
for k in attrs.getNames():
if k.startswith('xmlns'):
longNs = attrs[k]
eLongNs = longNs + '/'
if str(eLongNs) in self.knownNamespaces:
longNs = eLongNs
if k == 'xmlns':
nse.defaultNS = longNs
else:
shortNs = k[6:]
nse.nsMap[shortNs] = longNs
elif k == 'targetNamespace':
nse.targetNS = attrs.getValue(k)
elif k == 'objtype':
objtype = attrs.getValue(k)
nse = self.appendElementStack(element, nse)
if nsname != None:
if nsname in nse.nsMap:
longname = '%s:%s' % (nse.nsMap[nsname], name)
## elif objtype == None:
## errorString = 'Error unmarshalling XML document from source "%s": tag "%s" at line "%d", column "%d" has an undefined namespace' % (self.xmlSource, xsname, self._locator.getLineNumber(), self._locator.getColumnNumber())
## raise UnmarshallerException(errorString)
elif self.reversedNamespaces.has_key(nsname):
longname = '%s:%s' % (self.reversedNamespaces[nsname], name)
else:
longname = xsname
elif nse.defaultNS != None:
longname = '%s:%s' % (nse.defaultNS, name)
else:
longname = name
element.objtype = objtype
element.objclass = self.knownTypes.get(longname)
if element.objclass == None and len(self.knownNamespaces) == 0:
# handles common case where tags are unqualified and knownTypes are too, but there's a defaultNS
element.objclass = self.knownTypes.get(name)
if (hasattr(element.objclass, "__xmlcontent__")):
self.collectContent = element
def characters(self, content):
## print '[characters] "%s" (%s)' % (content, type(content))
if (content != None):
if self.collectContent != None:
self.collectContent.content += content
else:
self.elementstack[-1].content += content
def endElement(self, name):
## print "[endElement] </%s>" % name
xsname = name
i = name.rfind(':')
if i >= 0: # Strip namespace prefixes for now until actually looking them up in xsd
name = name[i+1:]
if self.skipper:
if xsname == "xs:annotation" or xsname == "xsd:annotation": # here too
self.skipper = False
self.popElementStack()
return
if self.collectContent != None:
if xsname != self.collectContent.xsname:
self.collectContent.content += ('</%s>' % (xsname))
self.popElementStack()
return
else:
self.collectContent = None
oldChildren = self.elementstack[-1].children
element, nse = self.popElementStack()
if ((len(self.elementstack) > 1) and (self.elementstack[-1].getobjtype() == "None")):
parentElement = self.elementstack[-2]
elif (len(self.elementstack) > 0):
parentElement = self.elementstack[-1]
objtype = element.getobjtype()
if (objtype == "None"):
return
constructorarglist = []
if (len(element.content) > 0):
strippedElementContent = element.content.strip()
if (len(strippedElementContent) > 0):
constructorarglist.append(element.content)
# If the element requires an object, but none is known, use the GenericXMLObject class
if ((element.objclass == None) and (element.attrs.get("objtype") == None) and ((len(element.attrs) > 0) or (len(element.children) > 0))):
if self.createGenerics:
element.objclass = GenericXMLObject
obj = _objectfactory(objtype, constructorarglist, element.objclass)
if element.objclass == GenericXMLObject:
obj.setXMLAttributes(str(xsname), element.attrs, element.children, nse.nsMap, nse.defaultNS)
complexType = getComplexType(obj)
if (obj != None):
if (hasattr(obj, "__xmlname__") and getattr(obj, "__xmlname__") == "sequence"):
self.elementstack[-1].children = oldChildren
return
if (len(element.attrs) > 0) and not isinstance(obj, list):
for attrname, attr in element.attrs.items():
if attrname == XMLNS or attrname.startswith(XMLNS_PREFIX):
if attrname.startswith(XMLNS_PREFIX):
ns = attrname[XMLNS_PREFIX_LENGTH:]
else:
ns = ""
if complexType != None or element.objclass == GenericXMLObject:
if not hasattr(obj, "__xmlnamespaces__"):
obj.__xmlnamespaces__ = {ns:attr}
elif ns not in obj.__xmlnamespaces__:
if (hasattr(obj.__class__, "__xmlnamespaces__")
and (obj.__xmlnamespaces__ is obj.__class__.__xmlnamespaces__)):
obj.__xmlnamespaces__ = dict(obj.__xmlnamespaces__)
obj.__xmlnamespaces__[ns] = attr
elif not attrname == "objtype":
if attrname.find(":") > -1: # Strip namespace prefixes for now until actually looking them up in xsd
attrname = attrname[attrname.find(":") + 1:]
if (complexType != None):
xsdElement = complexType.findElement(attrname)
if (xsdElement != None):
type = xsdElement.type
if (type != None):
if (type == TYPE_QNAME):
attr = nse.expandQName(name, attrname, attr)
type = xsdToLangType(type)
### ToDO remove maxOccurs hack after bug 177 is fixed
if attrname == "maxOccurs" and attr == "unbounded":
attr = "-1"
try:
attr = _objectfactory(type, attr)
except Exception as exceptData:
errorString = 'Error unmarshalling attribute "%s" at line %d, column %d in XML document from source "%s": %s' % (attrname, self._locator.getLineNumber(), self._locator.getColumnNumber(), self.xmlSource, str(exceptData))
raise UnmarshallerException(errorString)
try:
setattrignorecase(obj, _toAttrName(obj, attrname), attr)
except AttributeError:
errorString = 'Error setting value of attribute "%s" at line %d, column %d in XML document from source "%s": object type of XML element "%s" is not specified or known' % (attrname, self._locator.getLineNumber(), self._locator.getColumnNumber(), self.xmlSource, name)
raise UnmarshallerException(errorString)
## obj.__dict__[_toAttrName(obj, attrname)] = attr
# stuff any child attributes meant to be in a sequence via the __xmlflattensequence__
flattenDict = {}
if hasattr(obj, "__xmlflattensequence__"):
flatten = obj.__xmlflattensequence__
if (isinstance(flatten, dict)):
for sequencename, xmlnametuple in flatten.items():
if (xmlnametuple == None):
flattenDict[sequencename] = sequencename
elif (not isinstance(xmlnametuple, (tuple, list))):
flattenDict[str(xmlnametuple)] = sequencename
else:
for xmlname in xmlnametuple:
flattenDict[xmlname] = sequencename
else:
raise Exception("Invalid type for __xmlflattensequence___ : it must be a dict")
# reattach an object"s attributes to it
for childname, child in element.children:
if (childname in flattenDict):
sequencename = _toAttrName(obj, flattenDict[childname])
if (not hasattr(obj, sequencename)):
obj.__dict__[sequencename] = []
sequencevalue = getattr(obj, sequencename)
if (sequencevalue == None):
obj.__dict__[sequencename] = []
sequencevalue = getattr(obj, sequencename)
sequencevalue.append(child)
elif (objtype == "list"):
obj.append(child)
elif isinstance(obj, dict):
if (childname == DICT_ITEM_NAME):
obj[child[DICT_ITEM_KEY_NAME]] = child[DICT_ITEM_VALUE_NAME]
else:
obj[childname] = child
else:
# don't replace a good attribute value with a bad one
childAttrName = _toAttrName(obj, childname)
if (not hasattr(obj, childAttrName)) or (getattr(obj, childAttrName) == None) or (getattr(obj, childAttrName) == []) or (not isinstance(child, GenericXMLObject)):
try:
setattrignorecase(obj, childAttrName, child)
except AttributeError:
raise MarshallerException("Error unmarshalling child element \"%s\" of XML element \"%s\": object type not specified or known" % (childname, name))
if (complexType != None):
for element in complexType.elements:
if element.default:
elementName = _toAttrName(obj, element.name)
if ((elementName not in obj.__dict__) or (obj.__dict__[elementName] == None)):
langType = xsdToLangType(element.type)
defaultValue = _objectfactory(langType, element.default)
obj.__dict__[elementName] = defaultValue
if (isinstance(obj, list)):
if ((element.attrs.has_key("mutable")) and (element.attrs.getValue("mutable") == "false")):
obj = tuple(obj)
if (len(self.elementstack) > 0):
## print "[endElement] appending child with name: ", name, "; objtype: ", objtype
parentElement.children.append((name, obj))
else:
self.rootelement = obj
def getRootObject(self):
return self.rootelement
def _toAttrName(obj, name):
if (hasattr(obj, "__xmlrename__")):
for key, val in obj.__xmlrename__.items():
if (name == val):
name = key
break
## if (name.startswith("__") and not name.endswith("__")):
## name = "_%s%s" % (obj.__class__.__name__, name)
return str(name)
def printKnownTypes(kt, where):
print ('KnownTypes from %s' % (where))
for tag, cls in kt.iteritems():
print ('%s => %s' % (tag, str(cls)))
__typeMappingXsdToLang = {
"string": "str",
"char": "str",
"varchar": "str",
"date": "str", # ToDO Need to work out how to create lang date types
"boolean": "bool",
"decimal": "float", # ToDO Does python have a better fixed point type?
"int": "int",
"integer":"int",
"long": "long",
"float": "float",
"bool": "bool",
"str": "str",
"unicode": "unicode",
"short": "int",
"duration": "str", # see above (date)
"datetime": "str", # see above (date)
"time": "str", # see above (date)
"double": "float",
"QName" : "str",
"blob" : "str", # ag:blob
"currency" : "str", # ag:currency
}
def xsdToLangType(xsdType):
if xsdType.startswith(XMLSCHEMA_XSD_URL):
xsdType = xsdType[len(XMLSCHEMA_XSD_URL)+1:]
elif xsdType.startswith(AG_URL):
xsdType = xsdType[len(AG_URL)+1:]
langType = __typeMappingXsdToLang.get(xsdType)
if (langType == None):
raise Exception("Unknown xsd type %s" % xsdType)
return langType
def langToXsdType(langType):
if langType in asDict(__typeMappingXsdToLang):
return '%s:%s' % (XMLSCHEMA_XSD_URL, langType)
return langType
def _getXmlValue(langValue):
if (isinstance(langValue, bool)):
return str(langValue).lower()
elif (isinstance(langValue, unicode)):
return langValue.encode()
else:
return str(langValue)
def unmarshal(xmlstr, knownTypes=None, knownNamespaces=None, xmlSource=None, createGenerics=False):
objectfactory = XMLObjectFactory(knownTypes, knownNamespaces, xmlSource, createGenerics)
# on Linux, pyXML's sax.parseString fails when passed unicode
if (not apputils.is_windows()):
xmlstr = str(xmlstr)
try:
xml.sax.parseString(xmlstr, objectfactory)
except xml.sax.SAXParseException as errorData:
if xmlSource == None:
xmlSource = 'unknown'
errorString = 'SAXParseException ("%s") detected at line %d, column %d in XML document from source "%s" ' % (errorData.getMessage(), errorData.getLineNumber(), errorData.getColumnNumber(), xmlSource)
raise UnmarshallerException(errorString)
return objectfactory.getRootObject()
def marshal(obj, elementName=None, prettyPrint=False, marshalType=True, indent=0, knownTypes=None, knownNamespaces=None, encoding=-1):
worker = XMLMarshalWorker(prettyPrint=prettyPrint, marshalType=marshalType, knownTypes=knownTypes, knownNamespaces=knownNamespaces)
if obj != None and hasattr(obj, '__xmldeepexclude__'):
worker.xmldeepexclude = obj.__xmldeepexclude__
xmlstr = "".join(worker._marshal(obj, elementName, indent=indent))
xmlMarshallerLogger.info("marshal produced string of type %s", type(xmlstr))
if (encoding == None):
return xmlstr
if (not isinstance(encoding, basestring)):
encoding = sys.getdefaultencoding()
if (not isinstance(xmlstr, unicode)):
xmlstr = xmlstr.decode()
xmlstr = u'<?xml version="1.0" encoding="%s"?>\n%s' % (encoding, xmlstr)
return xmlstr.encode(encoding)
class XMLMarshalWorker(object):
def __init__(self, marshalType=True, prettyPrint=False, knownTypes=None, knownNamespaces=None):
if knownTypes == None:
self.knownTypes = {}
else:
self.knownTypes = knownTypes
if knownNamespaces == None:
self.knownNamespaces = {}
else:
self.knownNamespaces = knownNamespaces
self.prettyPrint = prettyPrint
self.marshalType = marshalType
self.xmldeepexclude = []
self.nsstack = []
def getNSPrefix(self):
if len(self.nsstack) > 0:
return self.nsstack[-1].prefix
return ''
def isKnownType(self, elementName):
tagLongNs = None
nse = self.nsstack[-1]
i = elementName.rfind(':')
if i > 0:
prefix = elementName[:i]
name = elementName[i+1:]
else:
prefix = DEFAULT_NAMESPACE_KEY
name = elementName
for shortNs, longNs in nse.nameSpaces.items():
if shortNs == prefix:
tagLongNs = longNs
break
if tagLongNs == None:
knownTagName = elementName
else:
knownShortNs = self.knownNamespaces[tagLongNs]
knownTagName = knownShortNs + ':' + name
if (knownTagName in self.knownTypes):
knownClass = self.knownTypes[knownTagName]
return True
return False
def popNSStack(self):
self.nsstack.pop()
def appendNSStack(self, obj):
nameSpaces = {}
defaultLongNS = None
for nse in self.nsstack:
for k, v in nse.nsMap.items():
nameSpaces[k] = v
if k == DEFAULT_NAMESPACE_KEY:
defaultLongNS = v
newNS = NsElement()
nameSpaceAttrs = ""
if hasattr(obj, "__xmlnamespaces__"):
ns = getattr(obj, "__xmlnamespaces__")
keys = list(ns.keys())
keys.sort()
for nameSpaceKey in keys:
nameSpaceUrl = ns[nameSpaceKey]
if nameSpaceUrl in nameSpaces.values():
for k, v in nameSpaces.iteritems():
if v == nameSpaceUrl:
nameSpaceKey = k
break
else:
if nameSpaceKey == "":
defaultLongNS = nameSpaceUrl
nameSpaces[DEFAULT_NAMESPACE_KEY] = nameSpaceUrl
newNS.nsMap[DEFAULT_NAMESPACE_KEY] = nameSpaceUrl
nameSpaceAttrs += ' xmlns="%s" ' % (nameSpaceUrl)
else:
nameSpaces[nameSpaceKey] = nameSpaceUrl
newNS.nsMap[nameSpaceKey] = nameSpaceUrl
nameSpaceAttrs += ' xmlns:%s="%s" ' % (nameSpaceKey, nameSpaceUrl)
nameSpaceAttrs = nameSpaceAttrs.rstrip()
if len(self.nsstack) > 0:
newNS.prefix = self.nsstack[-1].prefix
else:
newNS.prefix = ''
if obj != None and hasattr(obj, "__xmldefaultnamespace__"):
longPrefixNS = getattr(obj, "__xmldefaultnamespace__")
if longPrefixNS == defaultLongNS:
newNS.prefix = ''
else:
try:
for k, v in nameSpaces.items():
if v == longPrefixNS:
newNS.prefix = k + ':'
break;
except:
if (longPrefixNS in self.knownNamespaces):
newNS.prefix = self.knownNamespaces[longPrefixNS] + ':'
else:
raise MarshallerException('Error marshalling __xmldefaultnamespace__ ("%s") not defined in namespace stack' % (longPrefixNS))
if obj != None and hasattr(obj, "targetNamespace"):
newNS.targetNS = obj.targetNamespace
elif len(self.nsstack) > 0:
newNS.targetNS = self.nsstack[-1].targetNS
newNS.nameSpaces = nameSpaces
self.nsstack.append(newNS)
return nameSpaceAttrs
def contractQName(self, value, obj, attr):
value = langToXsdType(value)
i = value.rfind(':')
if i >= 0:
longNS = value[:i]
else:
# the value doesn't have a namespace and we couldn't map it to an XSD type...what to do?
# (a) just write it, as is, and hope it's in the default namespace (for now)
# (b) throw an exception so we can track down the bad code (later)
return value
if (longNS in self.nsstack[-1].nameSpaces.values()):
for kShort, vLong in self.nsstack[-1].nameSpaces.iteritems():
if vLong == longNS:
shortNS = kShort
break
else:
shortNS = longNS # if we can't find the long->short mappping, just use longNS
if shortNS == DEFAULT_NAMESPACE_KEY:
value = value[i+1:]
else:
value = shortNS + ':' + value[i+1:]
return value
def _genObjTypeStr(self, typeString):
if self.marshalType:
return ' objtype="%s"' % typeString
return ""
def _marshal(self, obj, elementName=None, nameSpacePrefix="", indent=0):
if (obj != None):
xmlMarshallerLogger.debug("--> _marshal: elementName=%s%s, type=%s, obj=%s, indent=%d", nameSpacePrefix, elementName, type(obj), str(obj), indent)
else:
xmlMarshallerLogger.debug("--> _marshal: elementName=%s%s, obj is None, indent=%d", nameSpacePrefix, elementName, indent)
if ((obj != None) and (hasattr(obj, 'preMarshal'))):
obj.preMarshal()
excludeAttrs = []
excludeAttrs.extend(self.xmldeepexclude)
if hasattr(obj, "__xmlexclude__"):
excludeAttrs.extend(obj.__xmlexclude__)
prettyPrint = self.prettyPrint
knownTypes = self.knownTypes
xmlString = None
if self.prettyPrint or indent:
prefix = " "*indent
newline = "\n"
increment = 2
else:
prefix = ""
newline = ""
increment = 0
## Determine the XML element name. If it isn"t specified in the
## parameter list, look for it in the __xmlname__ attribute,
## else use the default generic BASETYPE_ELEMENT_NAME.
nameSpaceAttrs = self.appendNSStack(obj)
nameSpacePrefix = self.getNSPrefix()
if not elementName:
if hasattr(obj, "__xmlname__"):
elementName = nameSpacePrefix + obj.__xmlname__
else:
elementName = nameSpacePrefix + BASETYPE_ELEMENT_NAME
else:
elementName = nameSpacePrefix + elementName
if (hasattr(obj, "__xmlsequencer__")) and (obj.__xmlsequencer__ != None):
if (XMLSCHEMA_XSD_URL in self.nsstack[-1].nameSpaces.values()):
for kShort, vLong in self.nsstack[-1].nameSpaces.iteritems():
if vLong == XMLSCHEMA_XSD_URL:
if kShort != DEFAULT_NAMESPACE_KEY:
xsdPrefix = kShort + ':'
else:
xsdPrefix = ''
break
else:
xsdPrefix = 'xs:'
elementAdd = xsdPrefix + obj.__xmlsequencer__
else:
elementAdd = None
members_to_skip = []
## Add more members_to_skip based on ones the user has selected
## via the __xmlexclude__ and __xmldeepexclude__ attributes.
members_to_skip.extend(excludeAttrs)
# Marshal the attributes that are selected to be XML attributes.
objattrs = ""
className = ag_className(obj)
classNamePrefix = "_" + className
if hasattr(obj, "__xmlattributes__"):
xmlattributes = obj.__xmlattributes__
members_to_skip.extend(xmlattributes)
for attr in xmlattributes:
internalAttrName = attr
if (attr.startswith("__") and not attr.endswith("__")):
internalAttrName = classNamePrefix + attr
# Fail silently if a python attribute is specified to be
# an XML attribute but is missing.
attrNameSpacePrefix = ""
if hasattr(obj, "__xmlattrnamespaces__"):
for nameSpaceKey, nameSpaceAttributes in getattr(obj, "__xmlattrnamespaces__").items():
if nameSpaceKey == nameSpacePrefix[:-1]: # Don't need to specify attribute namespace if it is the same as its element
continue
if attr in nameSpaceAttributes:
attrNameSpacePrefix = nameSpaceKey + ":"
break
attrs = obj.__dict__
value = attrs.get(internalAttrName)
if (hasattr(obj, "__xmlrename__") and attr in obj.__xmlrename__):
attr = obj.__xmlrename__[attr]
xsdElement = None
complexType = getComplexType(obj)
if (complexType != None):
xsdElement = complexType.findElement(attr)
if (xsdElement != None):
default = xsdElement.default
if (default != None):
if ((default == value) or (default == _getXmlValue(value))):
continue
else:
if (value == None):
continue
elif xsdElement.type == TYPE_QNAME:
value = self.contractQName(value, obj, attr)
elif value == None:
continue
# ToDO remove maxOccurs hack after bug 177 is fixed
if attr == "maxOccurs" and value == -1:
value = "unbounded"
if isinstance(value, bool):
if value == True:
value = "true"
else:
value = "false"
else:
value = objutils.toDiffableRepr(value)
objattrs += ' %s%s="%s"' % (attrNameSpacePrefix, attr, utillang.escape(value))
if (obj == None):
xmlString = [""]
elif isinstance(obj, bool):
objTypeStr = self._genObjTypeStr("bool")
xmlString = ['%s<%s%s>%s</%s>%s' % (prefix, elementName, objTypeStr, obj, elementName, newline)]
elif isinstance(obj, int):
objTypeStr = self._genObjTypeStr("int")
xmlString = ['%s<%s%s>%s</%s>%s' % (prefix, elementName, objTypeStr, str(obj), elementName, newline)]
elif isinstance(obj, long):
objTypeStr = self._genObjTypeStr("long")
xmlString = ['%s<%s%s>%s</%s>%s' % (prefix, elementName, objTypeStr, str(obj), elementName, newline)]
elif isinstance(obj, float):
objTypeStr = self._genObjTypeStr("float")
xmlString = ['%s<%s%s>%s</%s>%s' % (prefix, elementName, objTypeStr, str(obj), elementName, newline)]
elif isinstance(obj, unicode): # have to check before basestring - unicode is instance of base string
xmlString = ['%s<%s>%s</%s>%s' % (prefix, elementName, utillang.escape(obj), elementName, newline)]
elif isinstance(obj, basestring):
xmlString = ['%s<%s>%s</%s>%s' % (prefix, elementName, utillang.escape(obj.encode()), elementName, newline)]
elif isinstance(obj, datetime.datetime):
objTypeStr = self._genObjTypeStr("datetime")
xmlString = ['%s<%s%s>%s</%s>%s' % (prefix, elementName, objTypeStr, str(obj), elementName, newline)]
elif isinstance(obj, datetime.date):
objTypeStr = self._genObjTypeStr("date")
xmlString = ['%s<%s%s>%s</%s>%s' % (prefix, elementName, objTypeStr, str(obj), elementName, newline)]
elif isinstance(obj, datetime.time):
objTypeStr = self._genObjTypeStr("time")
xmlString = ['%s<%s%s>%s</%s>%s' % (prefix, elementName, objTypeStr, str(obj), elementName, newline)]
elif isinstance(obj, list):
if len(obj) < 1:
xmlString = ""
else:
objTypeStr = self._genObjTypeStr("list")
xmlString = ['%s<%s%s>%s' % (prefix, elementName, objTypeStr, newline)]
for item in obj:
xmlString.extend(self._marshal(item, indent=indent+increment))
xmlString.append("%s</%s>%s" % (prefix, elementName, newline))
elif isinstance(obj, tuple):
if len(obj) < 1:
xmlString = ""
else:
objTypeStr = self._genObjTypeStr("list")
xmlString = ['%s<%s%s mutable="false">%s' % (prefix, elementName, objTypeStr, newline)]
for item in obj:
xmlString.extend(self._marshal(item, indent=indent+increment))
xmlString.append("%s</%s>%s" % (prefix, elementName, newline))
elif isinstance(obj, dict):
objTypeStr = self._genObjTypeStr("dict")
xmlString = ['%s<%s%s>%s' % (prefix, elementName, objTypeStr, newline)]
subprefix = prefix + " "*increment
subindent = indent + 2*increment
keys = obj.keys()
keys.sort()
for key in keys:
xmlString.append("%s<%s>%s" % (subprefix, DICT_ITEM_NAME, newline))
xmlString.extend(self._marshal(key, elementName=DICT_ITEM_KEY_NAME, indent=subindent))
xmlString.extend(self._marshal(obj[key], elementName=DICT_ITEM_VALUE_NAME, indent=subindent))
xmlString.append("%s</%s>%s" % (subprefix, DICT_ITEM_NAME, newline))
xmlString.append("%s</%s>%s" % (prefix, elementName, newline))
elif hasattr(obj, "__xmlcontent__"):
contentValue = getattr(obj, obj.__xmlcontent__)
if contentValue == None:
xmlString = ["%s<%s%s%s/>%s" % (prefix, elementName, nameSpaceAttrs, objattrs, newline)]
else:
contentValue = utillang.escape(contentValue)
xmlString = ["%s<%s%s%s>%s</%s>%s" % (prefix, elementName, nameSpaceAttrs, objattrs, contentValue, elementName, newline)]
else:
# Only add the objtype if the element tag is unknown to us.
if (isinstance(obj, GenericXMLObject)):
objTypeStr = ""
elif (self.isKnownType(elementName) == True):
objTypeStr = ""
else:
objTypeStr = self._genObjTypeStr("%s.%s" % (obj.__class__.__module__, className))
xmlString = ['%s<%s%s%s%s' % (prefix, elementName, nameSpaceAttrs, objattrs, objTypeStr)]
# get the member, value pairs for the object, filtering out the types we don"t support
if (elementAdd != None):
prefix += increment*" "
indent += increment
xmlMemberString = []
if hasattr(obj, "__xmlbody__"):
xmlbody = getattr(obj, obj.__xmlbody__)
if xmlbody != None:
xmlMemberString.append(utillang.escape(xmlbody))
else:
if hasattr(obj, "__xmlattrgroups__"):
attrGroups = obj.__xmlattrgroups__.copy()
if (not isinstance(attrGroups, dict)):
raise Exception("__xmlattrgroups__ is not a dict, but must be")
for n in attrGroups.iterkeys():
members_to_skip.extend(attrGroups[n])
else:
attrGroups = {}
# add the list of all attributes to attrGroups
eList = list(obj.__dict__.keys())
eList.sort()
attrGroups["__nogroup__"] = eList
for eName, eList in attrGroups.items():
if (eName != "__nogroup__"):
prefix += increment*" "
indent += increment
objTypeStr = self._genObjTypeStr("None")
xmlMemberString.append('%s<%s%s>%s' % (prefix, eName, objTypeStr, newline))
for name in eList:
value = obj.__dict__[name]
if eName == "__nogroup__" and name in members_to_skip: continue
if name.startswith("__") and name.endswith("__"): continue
if (hasattr(obj, "__xmlcdatacontent__") and (obj.__xmlcdatacontent__ == name)):
continue
subElementNameSpacePrefix = nameSpacePrefix
if hasattr(obj, "__xmlattrnamespaces__"):
for nameSpaceKey, nameSpaceValues in getattr(obj, "__xmlattrnamespaces__").items():
if name in nameSpaceValues:
subElementNameSpacePrefix = nameSpaceKey + ":"
break
# handle sequences listed in __xmlflattensequence__
# specially: instead of listing the contained items inside
# of a separate list, as God intended, list them inside
# the object containing the sequence.
if (hasattr(obj, "__xmlflattensequence__") and (value != None) and (name in obj.__xmlflattensequence__)):
xmlnametuple = obj.__xmlflattensequence__[name]
if (xmlnametuple == None):
xmlnametuple = [name]
elif (not isinstance(xmlnametuple, (tuple,list))):
xmlnametuple = [str(xmlnametuple)]
xmlname = None
if (len(xmlnametuple) == 1):
xmlname = xmlnametuple[0]
if not isinstance(value, (list, tuple)):
value = [value]
for seqitem in value:
xmlMemberString.extend(self._marshal(seqitem, xmlname, subElementNameSpacePrefix, indent=indent+increment))
else:
if (hasattr(obj, "__xmlrename__") and name in obj.__xmlrename__):
xmlname = obj.__xmlrename__[name]
else:
xmlname = name
if (value != None):
xmlMemberString.extend(self._marshal(value, xmlname, subElementNameSpacePrefix, indent=indent+increment))
if (eName != "__nogroup__"):
xmlMemberString.append("%s</%s>%s" % (prefix, eName, newline))
prefix = prefix[:-increment]
indent -= increment
# if we have nested elements, add them here, otherwise close the element tag immediately.
newList = []
for s in xmlMemberString:
if (len(s) > 0): newList.append(s)
xmlMemberString = newList
if len(xmlMemberString) > 0:
xmlString.append(">")
if hasattr(obj, "__xmlbody__"):
xmlString.extend(xmlMemberString)
xmlString.append("</%s>%s" % (elementName, newline))
else:
xmlString.append(newline)
if (elementAdd != None):
xmlString.append("%s<%s>%s" % (prefix, elementAdd, newline))
xmlString.extend(xmlMemberString)
if (elementAdd != None):
xmlString.append("%s</%s>%s" % (prefix, elementAdd, newline))
prefix = prefix[:-increment]
indent -= increment
xmlString.append("%s</%s>%s" % (prefix, elementName, newline))
else:
if hasattr(obj, "__xmlcdatacontent__"):
cdataAttr = obj.__xmlcdatacontent__
cdataContent = obj.__dict__[cdataAttr]
xmlString.append("><![CDATA[%s]]></%s>%s" % (cdataContent, elementName, newline))
else:
xmlString.append("/>%s" % newline)
xmlMarshallerLogger.debug("<-- _marshal: %s", objutils.toDiffableString(xmlString))
#print "<-- _marshal: %s" % str(xmlString)
self.popNSStack()
return xmlString
# A simple test, to be executed when the xmlmarshaller is run standalone
class MarshallerPerson:
__xmlname__ = "person"
__xmlexclude__ = ["fabulousness",]
__xmlattributes__ = ("nonSmoker",)
__xmlrename__ = {"_phoneNumber": "telephone"}
__xmlflattensequence__ = {"favoriteWords": ("vocabulary",)}
__xmlattrgroups__ = {"name": ["firstName", "lastName"], "address": ["addressLine1", "city", "state", "zip"]}
def setPerson(self):
self.firstName = "Albert"
self.lastName = "Camus"
self.addressLine1 = "23 Absurd St."
self.city = "Ennui"
self.state = "MO"
self.zip = "54321"
self._phoneNumber = "808-303-2323"
self.favoriteWords = ["angst", "ennui", "existence"]
self.phobias = ["war", "tuberculosis", "cars"]
self.weight = 150
self.fabulousness = "tres tres"
self.nonSmoker = False
if __name__ == "__main__":
p1 = MarshallerPerson()
p1.setPerson()
xmlP1 = marshal(p1, prettyPrint=True, encoding="utf-8")
print ("\n########################")
print ("# testPerson test case #")
print ("########################")
print (xmlP1)
p2 = unmarshal(xmlP1)
xmlP2 = marshal(p2, prettyPrint=True, encoding="utf-8")
if xmlP1 == xmlP2:
print ("Success: repeated marshalling yields identical results")
else:
print ("Failure: repeated marshalling yields different results")
print (xmlP2) | PypiClean |
/KalaPy-0.4.2.tar.gz/KalaPy-0.4.2/kalapy/db/engines/mysql/_database.py | import MySQLdb as dbapi
from MySQLdb.converters import conversions
from MySQLdb.constants import FIELD_TYPE
from kalapy.db.engines import utils
from kalapy.db.engines.relational import RelationalDatabase
__all__ = ('DatabaseError', 'IntegrityError', 'Database')
DatabaseError = dbapi.DatabaseError
IntegrityError = dbapi.IntegrityError
CONV = conversions.copy()
CONV.update({
FIELD_TYPE.DECIMAL: utils.decimal_to_python,
})
class Database(RelationalDatabase):
data_types = {
"key" : "INTEGER AUTO_INCREMENT PRIMARY KEY",
"reference" : "INTEGER",
"char" : "VARCHAR(%(size)s)",
"text" : "LONGTEXT",
"integer" : "INTEGER",
"float" : "DOUBLE",
"decimal" : "DECIMAL(%(max_digits)s, %(decimal_places)s)",
"boolean" : "BOOL",
"datetime" : "DATETIME",
"binary" : "BLOB",
}
schema_mime = 'text/x-mysql'
def __init__(self, name, host=None, port=None, user=None, password=None):
super(Database, self).__init__(name, host, port, user, password)
self.connection = None
def connect(self):
if self.connection is not None:
return self
args = {
'db': self.name,
'charset': 'utf8',
'use_unicode': True,
'conv': CONV,
}
if self.user:
args['user'] = self.user
if self.password:
args['passwd'] = self.password
if self.host:
args['host'] = self.host
if self.port:
args['port'] = self.port
self.connection = dbapi.connect(**args)
return self
def fix_quote(self, sql):
return sql.replace('"', '`')
def get_create_sql(self, model):
sql = super(Database, self).get_create_sql(model)
return "%s ENGINE=`InnoDB`;" % (sql[:-1],)
def exists_table(self, model):
cursor = self.cursor()
cursor.execute("""
SELECT COUNT(*)
FROM information_schema.tables
WHERE table_name = %s AND table_schema = %s;
""", (model._meta.table, self.name,))
return bool(cursor.fetchone()[0])
def lastrowid(self, cursor, model):
cursor.execute('SELECT LAST_INSERT_ID()')
return cursor.fetchone()[0] | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/nodes/VariableRefNodes.py | from nuitka import Builtins, Variables
from nuitka.ModuleRegistry import getOwnerFromCodeName
from nuitka.PythonVersions import python_version
from nuitka.tree.TreeHelpers import makeStatementsSequenceFromStatements
from .ConstantRefNodes import makeConstantRefNode
from .DictionaryNodes import (
ExpressionDictOperationIn,
ExpressionDictOperationItem,
ExpressionDictOperationNotIn,
StatementDictOperationRemove,
StatementDictOperationSet,
)
from .ExpressionBases import ExpressionBase, ExpressionNoSideEffectsMixin
from .ModuleAttributeNodes import (
ExpressionModuleAttributeLoaderRef,
ExpressionModuleAttributeNameRef,
ExpressionModuleAttributePackageRef,
ExpressionModuleAttributeSpecRef,
)
from .NodeMakingHelpers import (
makeRaiseExceptionReplacementExpression,
makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue,
)
from .OutlineNodes import ExpressionOutlineBody
from .ReturnNodes import makeStatementReturn
from .shapes.StandardShapes import tshape_unknown
from .SubscriptNodes import ExpressionSubscriptLookupForUnpack
class ExpressionVariableRefBase(ExpressionBase):
# Base classes can be abstract, pylint: disable=abstract-method
__slots__ = "variable", "variable_trace"
def __init__(self, variable, source_ref):
ExpressionBase.__init__(self, source_ref=source_ref)
self.variable = variable
self.variable_trace = None
def finalize(self):
del self.parent
del self.variable
del self.variable_trace
def getVariableName(self):
return self.variable.getName()
def getVariable(self):
return self.variable
def getVariableTrace(self):
return self.variable_trace
def getTypeShape(self):
if self.variable_trace is None:
return tshape_unknown
else:
return self.variable_trace.getTypeShape()
def onContentEscapes(self, trace_collection):
trace_collection.onVariableContentEscapes(self.variable)
def computeExpressionLen(self, len_node, trace_collection):
if self.variable_trace is not None and self.variable_trace.isAssignTrace():
value = self.variable_trace.getAssignNode().subnode_source
shape = value.getValueShape()
has_len = shape.hasShapeSlotLen()
if has_len is False:
# Any exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
return makeRaiseTypeErrorExceptionReplacementFromTemplateAndValue(
template="object of type '%s' has no len()",
operation="len",
original_node=len_node,
value_node=self,
)
elif has_len is True:
iter_length = value.getIterationLength()
if iter_length is not None:
result = makeConstantRefNode(
constant=int(iter_length), # make sure to downcast long
source_ref=len_node.getSourceReference(),
)
return (
result,
"new_constant",
lambda: "Predicted 'len' result of variable '%s'."
% self.getVariableName(),
)
# The variable itself is to be considered escaped.
trace_collection.markActiveVariableAsEscaped(self.variable)
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
return len_node, None, None
def computeExpressionAttribute(self, lookup_node, attribute_name, trace_collection):
if self.variable_trace is not None:
attribute_node = self.variable_trace.getAttributeNode()
if attribute_node is not None:
# The variable itself is to be considered escaped no matter what, since
# we don't know exactly what the attribute is used for later on. We would
# have to attach the variable to the result created here in such a way,
# that e.g. calling it will make it escaped only.
trace_collection.markActiveVariableAsEscaped(self.variable)
return attribute_node.computeExpressionAttribute(
lookup_node=lookup_node,
attribute_name=attribute_name,
trace_collection=trace_collection,
)
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# The variable itself is to be considered escaped.
trace_collection.markActiveVariableAsEscaped(self.variable)
if not self.isKnownToHaveAttribute(attribute_name):
trace_collection.onExceptionRaiseExit(BaseException)
return lookup_node, None, None
def mayRaiseExceptionAttributeLookup(self, exception_type, attribute_name):
return not self.isKnownToHaveAttribute(attribute_name)
def isKnownToHaveAttribute(self, attribute_name):
if self.variable_trace is not None:
type_shape = self.variable_trace.getTypeShape()
if type_shape.isKnownToHaveAttribute(attribute_name):
return True
attribute_node = self.variable_trace.getAttributeNode()
if attribute_node is not None:
return attribute_node.isKnownToHaveAttribute(attribute_name)
return None
def computeExpressionImportName(self, import_node, import_name, trace_collection):
# TODO: For include modules, something might be possible here.
return self.computeExpressionAttribute(
lookup_node=import_node,
attribute_name=import_name,
trace_collection=trace_collection,
)
def computeExpressionComparisonIn(self, in_node, value_node, trace_collection):
tags = None
message = None
# Any code could be run, note that.
trace_collection.onControlFlowEscape(in_node)
if self.variable_trace.hasShapeDictionaryExact():
tags = "new_expression"
message = """\
Check '%s' on dictionary lowered to dictionary '%s'.""" % (
in_node.comparator,
in_node.comparator,
)
if in_node.comparator == "In":
in_node = ExpressionDictOperationIn(
key=value_node,
dict_arg=self,
source_ref=in_node.getSourceReference(),
)
else:
in_node = ExpressionDictOperationNotIn(
key=value_node,
dict_arg=self,
source_ref=in_node.getSourceReference(),
)
# Any exception may be raised.
if in_node.mayRaiseException(BaseException):
trace_collection.onExceptionRaiseExit(BaseException)
return in_node, tags, message
def computeExpressionSetSubscript(
self, set_node, subscript, value_node, trace_collection
):
tags = None
message = None
# By default, an subscript may change everything about the lookup
# source.
if self.variable_trace.hasShapeDictionaryExact():
result = StatementDictOperationSet(
dict_arg=self,
key=subscript,
value=value_node,
source_ref=set_node.getSourceReference(),
)
change_tags = "new_statements"
change_desc = """\
Subscript assignment to dictionary lowered to dictionary assignment."""
trace_collection.removeKnowledge(self)
result2, change_tags2, change_desc2 = result.computeStatementOperation(
trace_collection
)
if result2 is not result:
trace_collection.signalChange(
tags=change_tags,
source_ref=self.source_ref,
message=change_desc,
)
return result2, change_tags2, change_desc2
else:
return result, change_tags, change_desc
trace_collection.removeKnowledge(self)
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception might be raised.
if set_node.mayRaiseException(BaseException):
trace_collection.onExceptionRaiseExit(BaseException)
return set_node, tags, message
def computeExpressionDelSubscript(self, del_node, subscript, trace_collection):
tags = None
message = None
if self.variable_trace.hasShapeDictionaryExact():
result = StatementDictOperationRemove(
dict_arg=self,
key=subscript,
source_ref=del_node.getSourceReference(),
)
change_tags = "new_statements"
change_desc = """\
Subscript del to dictionary lowered to dictionary del."""
trace_collection.removeKnowledge(self)
result2, change_tags2, change_desc2 = result.computeStatementOperation(
trace_collection
)
if result2 is not result:
trace_collection.signalChange(
tags=change_tags,
source_ref=self.source_ref,
message=change_desc,
)
return result2, change_tags2, change_desc2
else:
return result, change_tags, change_desc
# By default, an subscript may change everything about the lookup
# source.
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception might be raised.
if del_node.mayRaiseException(BaseException):
trace_collection.onExceptionRaiseExit(BaseException)
return del_node, tags, message
def computeExpressionSubscript(self, lookup_node, subscript, trace_collection):
tags = None
message = None
if self.variable_trace.hasShapeDictionaryExact():
return trace_collection.computedExpressionResult(
expression=ExpressionDictOperationItem(
dict_arg=self,
key=subscript,
source_ref=lookup_node.getSourceReference(),
),
change_tags="new_expression",
change_desc="""\
Subscript look-up to dictionary lowered to dictionary look-up.""",
)
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception might be raised.
if lookup_node.mayRaiseException(BaseException):
trace_collection.onExceptionRaiseExit(BaseException)
return lookup_node, tags, message
def _applyReplacement(self, trace_collection, replacement):
trace_collection.signalChange(
"new_expression",
self.source_ref,
"Value propagated for '%s' from '%s'."
% (self.variable.getName(), replacement.getSourceReference().getAsString()),
)
# Special case for in-place assignments.
if self.parent.isExpressionOperationInplace():
statement = self.parent.parent
if statement.isStatementAssignmentVariable():
statement.removeMarkAsInplaceSuspect()
# Need to compute the replacement still.
return replacement.computeExpressionRaw(trace_collection)
_hard_names = ("dir", "eval", "exec", "execfile", "locals", "vars", "super")
class ExpressionVariableRef(ExpressionVariableRefBase):
kind = "EXPRESSION_VARIABLE_REF"
__slots__ = ()
def __init__(self, variable, source_ref):
assert variable is not None
ExpressionVariableRefBase.__init__(
self, variable=variable, source_ref=source_ref
)
@staticmethod
def isExpressionVariableRef():
return True
def getDetails(self):
return {"variable": self.variable}
def getDetailsForDisplay(self):
return {
"variable_name": self.variable.getName(),
"owner": self.variable.getOwner().getCodeName(),
}
@staticmethod
def isExpressionTempVariableRef():
return True
@classmethod
def fromXML(cls, provider, source_ref, **args):
assert cls is ExpressionVariableRef, cls
owner = getOwnerFromCodeName(args["owner"])
variable = owner.getProvidedVariable(args["variable_name"])
return cls(variable=variable, source_ref=source_ref)
def getVariable(self):
return self.variable
def setVariable(self, variable):
assert isinstance(variable, Variables.Variable), repr(variable)
self.variable = variable
def computeExpressionRaw(self, trace_collection):
# Terribly detailed, pylint: disable=too-many-branches,too-many-statements
variable = self.variable
assert variable is not None
self.variable_trace = trace_collection.getVariableCurrentTrace(
variable=variable
)
replacement = self.variable_trace.getReplacementNode(self)
if replacement is not None:
return self._applyReplacement(trace_collection, replacement)
if not self.variable_trace.mustHaveValue():
# TODO: This could be way more specific surely, either NameError or UnboundLocalError
# could be decided from context.
trace_collection.onExceptionRaiseExit(BaseException)
if variable.isModuleVariable() and variable.hasDefiniteWrites() is False:
variable_name = self.variable.getName()
if variable_name in Builtins.builtin_exception_names:
if not self.variable.getOwner().getLocalsScope().isEscaped():
from .BuiltinRefNodes import ExpressionBuiltinExceptionRef
new_node = ExpressionBuiltinExceptionRef(
exception_name=self.variable.getName(),
source_ref=self.source_ref,
)
change_tags = "new_builtin_ref"
change_desc = """\
Module variable '%s' found to be built-in exception reference.""" % (
variable_name
)
else:
self.variable_trace.addUsage()
new_node = self
change_tags = None
change_desc = None
elif variable_name in Builtins.builtin_names:
if (
variable_name in _hard_names
or not self.variable.getOwner().getLocalsScope().isEscaped()
):
from .BuiltinRefNodes import makeExpressionBuiltinRef
new_node = makeExpressionBuiltinRef(
builtin_name=variable_name,
locals_scope=self.getFunctionsLocalsScope(),
source_ref=self.source_ref,
)
change_tags = "new_builtin_ref"
change_desc = """\
Module variable '%s' found to be built-in reference.""" % (
variable_name
)
else:
self.variable_trace.addUsage()
new_node = self
change_tags = None
change_desc = None
elif variable_name == "__name__":
new_node = ExpressionModuleAttributeNameRef(
variable=variable, source_ref=self.source_ref
)
change_tags = "new_expression"
change_desc = """\
Replaced read-only module attribute '__name__' with module attribute reference."""
elif variable_name == "__package__":
new_node = ExpressionModuleAttributePackageRef(
variable=variable, source_ref=self.source_ref
)
change_tags = "new_expression"
change_desc = """\
Replaced read-only module attribute '__package__' with module attribute reference."""
elif variable_name == "__loader__" and python_version >= 0x300:
new_node = ExpressionModuleAttributeLoaderRef(
variable=variable, source_ref=self.source_ref
)
change_tags = "new_expression"
change_desc = """\
Replaced read-only module attribute '__loader__' with module attribute reference."""
elif variable_name == "__spec__" and python_version >= 0x340:
new_node = ExpressionModuleAttributeSpecRef(
variable=variable, source_ref=self.source_ref
)
change_tags = "new_expression"
change_desc = """\
Replaced read-only module attribute '__spec__' with module attribute reference."""
else:
self.variable_trace.addUsage()
# Probably should give a warning once about it.
new_node = self
change_tags = None
change_desc = None
return new_node, change_tags, change_desc
self.variable_trace.addUsage()
if self.variable_trace.mustNotHaveValue():
assert self.variable.isLocalVariable(), self.variable
variable_name = self.variable.getName()
result = makeRaiseExceptionReplacementExpression(
expression=self,
exception_type="UnboundLocalError",
exception_value="""local variable '%s' referenced before assignment"""
% variable_name,
)
return (
result,
"new_raise",
"Variable access of not initialized variable '%s'" % variable_name,
)
return self, None, None
def computeExpressionCall(self, call_node, call_args, call_kw, trace_collection):
if self.variable_trace is not None:
attribute_node = self.variable_trace.getAttributeNode()
if attribute_node is not None:
# The variable itself is to be considered escaped no matter what, since
# we don't know exactly what the attribute is used for later on. We would
# have to attach the variable to the result created here in such a way,
# that e.g. calling it will make it escaped only.
trace_collection.markActiveVariableAsEscaped(self.variable)
return attribute_node.computeExpressionCallViaVariable(
call_node=call_node,
variable_ref_node=self,
call_args=call_args,
call_kw=call_kw,
trace_collection=trace_collection,
)
# The called and the arguments escape for good.
self.onContentEscapes(trace_collection)
if call_args is not None:
call_args.onContentEscapes(trace_collection)
if call_kw is not None:
call_kw.onContentEscapes(trace_collection)
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
# Any exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
if (
self.variable.getName() in _hard_names
and self.variable.isIncompleteModuleVariable()
):
# Just inform the collection that all escaped.
trace_collection.onLocalsUsage(locals_scope=self.getFunctionsLocalsScope())
return call_node, None, None
def computeExpressionBool(self, trace_collection):
if self.variable_trace is not None:
attribute_node = self.variable_trace.getAttributeNode()
if attribute_node is not None:
if (
attribute_node.isCompileTimeConstant()
and not attribute_node.isMutable()
):
return attribute_node.computeExpressionBool(trace_collection)
# TODO: This is probably only default stuff here, that could be compressed.
if not self.mayRaiseException(BaseException) and self.mayRaiseExceptionBool(
BaseException
):
trace_collection.onExceptionRaiseExit(BaseException)
return None, None, None
def hasShapeDictionaryExact(self):
return (
self.variable_trace is not None
and self.variable_trace.hasShapeDictionaryExact()
)
def hasShapeStrExact(self):
return (
self.variable_trace is not None and self.variable_trace.hasShapeStrExact()
)
def hasShapeUnicodeExact(self):
return (
self.variable_trace is not None
and self.variable_trace.hasShapeUnicodeExact()
)
def getTruthValue(self):
return self.variable_trace.getTruthValue()
def getComparisonValue(self):
return self.variable_trace.getComparisonValue()
@staticmethod
def isKnownToBeIterable(count):
return None
def mayHaveSideEffects(self):
return not self.variable_trace.mustHaveValue()
def mayRaiseException(self, exception_type):
return self.variable_trace is None or not self.variable_trace.mustHaveValue()
def mayRaiseExceptionBool(self, exception_type):
return (
self.variable_trace is None
or not self.variable_trace.mustHaveValue()
or not self.variable_trace.getTypeShape().hasShapeSlotBool()
)
def getFunctionsLocalsScope(self):
return self.getParentVariableProvider().getLocalsScope()
class ExpressionVariableOrBuiltinRef(ExpressionVariableRef):
kind = "EXPRESSION_VARIABLE_OR_BUILTIN_REF"
__slots__ = ("locals_scope",)
def __init__(self, variable, locals_scope, source_ref):
ExpressionVariableRef.__init__(self, variable=variable, source_ref=source_ref)
self.locals_scope = locals_scope
def getDetails(self):
return {"variable": self.variable, "locals_scope": self.locals_scope}
def getFunctionsLocalsScope(self):
return self.locals_scope
def makeExpressionVariableRef(variable, locals_scope, source_ref):
if variable.getName() in _hard_names:
return ExpressionVariableOrBuiltinRef(
variable=variable, locals_scope=locals_scope, source_ref=source_ref
)
else:
return ExpressionVariableRef(variable=variable, source_ref=source_ref)
# Note: Temporary variable references are to be guaranteed to not raise
# therefore no side effects.
class ExpressionTempVariableRef(
ExpressionNoSideEffectsMixin, ExpressionVariableRefBase
):
kind = "EXPRESSION_TEMP_VARIABLE_REF"
def __init__(self, variable, source_ref):
assert variable.isTempVariable()
ExpressionVariableRefBase.__init__(
self, variable=variable, source_ref=source_ref
)
def getDetailsForDisplay(self):
return {
"temp_name": self.variable.getName(),
"owner": self.variable.getOwner().getCodeName(),
}
def getDetails(self):
return {"variable": self.variable}
@classmethod
def fromXML(cls, provider, source_ref, **args):
assert cls is ExpressionTempVariableRef, cls
owner = getOwnerFromCodeName(args["owner"])
variable = owner.getTempVariable(None, args["temp_name"])
return cls(variable=variable, source_ref=source_ref)
def computeExpressionRaw(self, trace_collection):
self.variable_trace = trace_collection.getVariableCurrentTrace(
variable=self.variable
)
replacement = self.variable_trace.getReplacementNode(self)
if replacement is not None:
return self._applyReplacement(trace_collection, replacement)
self.variable_trace.addUsage()
# Nothing to do here.
return self, None, None
def _makeIterationNextReplacementNode(
self, trace_collection, next_node, iterator_assign_node
):
from .OperatorNodes import makeExpressionOperationBinaryInplace
from .VariableAssignNodes import makeStatementAssignmentVariable
provider = trace_collection.getOwner()
outline_body = ExpressionOutlineBody(
provider=provider,
name="next_value_accessor",
source_ref=self.source_ref,
)
if next_node.isExpressionSpecialUnpack():
source = ExpressionSubscriptLookupForUnpack(
expression=ExpressionTempVariableRef(
variable=iterator_assign_node.tmp_iterated_variable,
source_ref=self.source_ref,
),
subscript=ExpressionTempVariableRef(
variable=iterator_assign_node.tmp_iteration_count_variable,
source_ref=self.source_ref,
),
expected=next_node.getExpected(),
source_ref=self.source_ref,
)
else:
source = ExpressionSubscriptLookupForUnpack(
expression=ExpressionTempVariableRef(
variable=iterator_assign_node.tmp_iterated_variable,
source_ref=self.source_ref,
),
subscript=ExpressionTempVariableRef(
variable=iterator_assign_node.tmp_iteration_count_variable,
source_ref=self.source_ref,
),
expected=None,
source_ref=self.source_ref,
)
statements = (
makeStatementAssignmentVariable(
variable=iterator_assign_node.tmp_iteration_next_variable,
source=source,
source_ref=self.source_ref,
),
makeStatementAssignmentVariable(
variable=iterator_assign_node.tmp_iteration_count_variable,
source=makeExpressionOperationBinaryInplace(
left=ExpressionTempVariableRef(
variable=iterator_assign_node.tmp_iteration_count_variable,
source_ref=self.source_ref,
),
right=makeConstantRefNode(constant=1, source_ref=self.source_ref),
operator="IAdd",
source_ref=self.source_ref,
),
source_ref=self.source_ref,
),
makeStatementReturn(
expression=ExpressionTempVariableRef(
variable=iterator_assign_node.tmp_iteration_next_variable,
source_ref=self.source_ref,
),
source_ref=self.source_ref,
),
)
outline_body.setChild(
"body",
makeStatementsSequenceFromStatements(*statements),
)
return False, trace_collection.computedExpressionResultRaw(
outline_body,
change_tags="new_expression",
change_desc=lambda: "Iterator 'next' converted to %s."
% iterator_assign_node.getIterationIndexDesc(),
)
def computeExpressionNext1(self, next_node, trace_collection):
iteration_source_node = self.variable_trace.getIterationSourceNode()
if iteration_source_node is not None:
if iteration_source_node.parent.isStatementAssignmentVariableIterator():
iterator_assign_node = iteration_source_node.parent
if iterator_assign_node.tmp_iterated_variable is not None:
return self._makeIterationNextReplacementNode(
trace_collection=trace_collection,
next_node=next_node,
iterator_assign_node=iterator_assign_node,
)
iteration_source_node.onContentIteratedEscapes(trace_collection)
if iteration_source_node.mayHaveSideEffectsNext():
trace_collection.onControlFlowEscape(self)
else:
self.onContentEscapes(trace_collection)
# Any code could be run, note that.
if self.mayHaveSideEffectsNext():
trace_collection.onControlFlowEscape(self)
# Any exception may be raised.
trace_collection.onExceptionRaiseExit(BaseException)
return True, (next_node, None, None)
def mayRaiseExceptionImportName(self, exception_type, import_name):
if self.variable_trace is not None and self.variable_trace.isAssignTrace():
return self.variable_trace.getAssignNode().subnode_source.mayRaiseExceptionImportName(
exception_type, import_name
)
else:
return True
@staticmethod
def isKnownToBeIterableAtMin(count):
# TODO: See through the variable current trace.
return None | PypiClean |
/KalturaApiClient-19.3.0.tar.gz/KalturaApiClient-19.3.0/KalturaClient/Plugins/BulkUpload.py | from __future__ import absolute_import
from .Core import *
from ..Base import (
getXmlNodeBool,
getXmlNodeFloat,
getXmlNodeInt,
getXmlNodeText,
KalturaClientPlugin,
KalturaEnumsFactory,
KalturaObjectBase,
KalturaObjectFactory,
KalturaParams,
KalturaServiceBase,
)
########## enums ##########
########## classes ##########
# @package Kaltura
# @subpackage Client
class KalturaBulkServiceData(KalturaObjectBase):
"""This class represents object-specific data passed to the
bulk upload job."""
def __init__(self):
KalturaObjectBase.__init__(self)
PROPERTY_LOADERS = {
}
def fromXml(self, node):
KalturaObjectBase.fromXml(self, node)
self.fromXmlImpl(node, KalturaBulkServiceData.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaObjectBase.toParams(self)
kparams.put("objectType", "KalturaBulkServiceData")
return kparams
########## services ##########
# @package Kaltura
# @subpackage Client
class KalturaBulkService(KalturaServiceBase):
"""Bulk upload service is used to upload & manage bulk uploads"""
def __init__(self, client = None):
KalturaServiceBase.__init__(self, client)
def abort(self, id):
"""Aborts the bulk upload and all its child jobs"""
kparams = KalturaParams()
kparams.addIntIfDefined("id", id);
self.client.queueServiceActionCall("bulkupload_bulk", "abort", "KalturaBulkUpload", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaBulkUpload')
def get(self, id):
"""Get bulk upload batch job by id"""
kparams = KalturaParams()
kparams.addIntIfDefined("id", id);
self.client.queueServiceActionCall("bulkupload_bulk", "get", "KalturaBulkUpload", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaBulkUpload')
def list(self, bulkUploadFilter = NotImplemented, pager = NotImplemented):
"""List bulk upload batch jobs"""
kparams = KalturaParams()
kparams.addObjectIfDefined("bulkUploadFilter", bulkUploadFilter)
kparams.addObjectIfDefined("pager", pager)
self.client.queueServiceActionCall("bulkupload_bulk", "list", "KalturaBulkUploadListResponse", kparams)
if self.client.isMultiRequest():
return self.client.getMultiRequestResult()
resultNode = self.client.doQueue()
return KalturaObjectFactory.create(resultNode, 'KalturaBulkUploadListResponse')
def serve(self, id):
"""serve action returns the original file."""
kparams = KalturaParams()
kparams.addIntIfDefined("id", id);
self.client.queueServiceActionCall('bulkupload_bulk', 'serve', None ,kparams)
return self.client.getServeUrl()
def serveLog(self, id):
"""serveLog action returns the log file for the bulk-upload job."""
kparams = KalturaParams()
kparams.addIntIfDefined("id", id);
self.client.queueServiceActionCall('bulkupload_bulk', 'serveLog', None ,kparams)
return self.client.getServeUrl()
########## main ##########
class KalturaBulkUploadClientPlugin(KalturaClientPlugin):
# KalturaBulkUploadClientPlugin
instance = None
# @return KalturaBulkUploadClientPlugin
@staticmethod
def get():
if KalturaBulkUploadClientPlugin.instance == None:
KalturaBulkUploadClientPlugin.instance = KalturaBulkUploadClientPlugin()
return KalturaBulkUploadClientPlugin.instance
# @return array<KalturaServiceBase>
def getServices(self):
return {
'bulk': KalturaBulkService,
}
def getEnums(self):
return {
}
def getTypes(self):
return {
'KalturaBulkServiceData': KalturaBulkServiceData,
}
# @return string
def getName(self):
return 'bulkUpload' | PypiClean |
/DeepGlow-1.0.0.tar.gz/DeepGlow-1.0.0/paper/plots_paper.ipynb | ```
from tensorflow import keras
from matplotlib import pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import random
import tensorflow as tf
from matplotlib import rc
from glob import glob
rootdir = '/home/oliver/Documents/PhD/Projects/NNaglow/boxfit/final/'
model_ism = keras.models.load_model(rootdir+'model-ism-final.hdf5', compile=False)
model_wind = keras.models.load_model(rootdir+'model-wind-final.hdf5', compile=False)
rootname='data/boxfit_ism_final'
train_dataset_ism = pd.read_csv(rootdir+rootname+'_trainfeatures.csv')
test_dataset_ism = pd.read_csv(rootdir+rootname+'_testfeatures.csv')
train_labels_ism = pd.read_csv(rootdir+rootname+'_trainlabels.csv')
test_labels_ism = pd.read_csv(rootdir+rootname+'_testlabels.csv')
rootname='data/boxfit_wind_final'
train_dataset_wind = pd.read_csv(rootdir+rootname+'_trainfeatures.csv')
test_dataset_wind = pd.read_csv(rootdir+rootname+'_testfeatures.csv')
train_labels_wind = pd.read_csv(rootdir+rootname+'_trainlabels.csv')
test_labels_wind = pd.read_csv(rootdir+rootname+'_testlabels.csv')
scaler_in_ism = StandardScaler()
train_dataset_scaled_ism = scaler_in_ism.fit_transform(train_dataset_ism)
test_dataset_scaled_ism = scaler_in_ism.transform(test_dataset_ism)
scaler_out_ism= StandardScaler()
train_labels_scaled_ism = scaler_out_ism.fit_transform(train_labels_ism)
scaler_in_wind = StandardScaler()
train_dataset_scaled_wind = scaler_in_wind.fit_transform(train_dataset_wind)
test_dataset_scaled_wind = scaler_in_wind.transform(test_dataset_wind)
scaler_out_wind= StandardScaler()
train_labels_scaled_wind = scaler_out_wind.fit_transform(train_labels_wind)
X = train_dataset_ism
Xmean = X.mean(axis=0).values
Xstd = X.std(axis=0).values
Y = train_labels_ism
Ymean = Y.mean(axis=0).values
Ystd = Y.std(axis=0).values
scale_facs = np.append(np.c_[Xmean,Xstd],np.c_[Ymean,Ystd])
print(Xstd - scale_facs[:-234][1::2])
np.savetxt(rootdir+'scale_facs_ism_final.csv',scale_facs,delimiter=',')
X = train_dataset_wind
Xmean = X.mean(axis=0).values
Xstd = X.std(axis=0).values
Y = train_labels_wind
Ymean = Y.mean(axis=0).values
Ystd = Y.std(axis=0).values
scale_facs = np.append(np.c_[Xmean,Xstd],np.c_[Ymean,Ystd])
print(Xstd - scale_facs[:-234][1::2])
np.savetxt(rootdir+'scale_facs_wind_final.csv',scale_facs,delimiter=',')
nids = np.arange(20,2020,20)
NNisms = []
NNwinds = []
for n in nids:
NNisms.append('model-stdsc-'+str(n)+'.hdf5')
NNwinds.append('model-wind-stdsc-'+str(n)+'.hdf5')
ism_errs_test = []
wind_errs_test = []
for NN in NNisms:
model_ism = keras.models.load_model(rootdir+'NNs/'+NN,compile=False)
test_predictions_ism = model_ism.predict(test_dataset_scaled_ism)
test_predictions_unscaled_ism = scaler_out_ism.inverse_transform(test_predictions_ism)
test_predictions_ism = 10**test_predictions_unscaled_ism
labels_ism = 10**test_labels_ism
err_ism = (test_predictions_ism-labels_ism)/labels_ism
ism_errs_test.append(np.nanmedian(abs(err_ism)))
print(NN)
np.save('ism_errs_stdsc_test.npy',ism_errs_test)
for NN in NNwinds:
model_wind = keras.models.load_model(rootdir+'NNs/'+NN,compile=False)
test_predictions_wind = model_wind.predict(test_dataset_scaled_wind)
test_predictions_unscaled_wind = scaler_out_wind.inverse_transform(test_predictions_wind)
test_predictions_wind = 10**test_predictions_unscaled_wind
labels_wind = 10**test_labels_wind
err_wind = (test_predictions_wind-labels_wind)/labels_wind
wind_errs_test.append(np.nanmedian(abs(err_wind)))
print(NN)
np.save('wind_errs_stdsc_test.npy',wind_errs_test)
trainingsize_NNs = ['boxfit_ism_stdsc_5623.h5','boxfit_ism_stdsc_11246.h5', 'boxfit_ism_stdsc_22493.h5', 'boxfit_ism_stdsc_44987.h5', 'boxfit_ism_stdsc_89975.h5', 'boxfit_ism_stdsc_179950.hdf5']
trainingsize = [5623,11246,22493,44987,89975,179950]
trainingsize_errs = []
for NN in trainingsize_NNs:
model = keras.models.load_model(rootdir+'NNs/trainingdata_subset/'+NN,compile=False)
test_predictions = model.predict(test_dataset_scaled_ism)
test_predictions_unscaled = scaler_out_ism.inverse_transform(test_predictions)
test_predictions = 10**test_predictions_unscaled
labels = 10**test_labels_ism
err= (test_predictions-labels)/labels
trainingsize_errs.append(np.nanmedian(abs(err)))
np.save('ism_errs_trainingsizze.npy',trainingsize_errs)
print(trainingsize_errs)
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
plt.figure(figsize=(7,3.625))
plt.style.use('science')
plt.scatter(trainingsize,trainingsize_errs,c='C1',label='ISM environment')
plt.plot(trainingsize,trainingsize_errs,linestyle='--',c='C1')
matplotlib.rcParams['legend.frameon'] = True
plt.legend(loc='upper right',fontsize=12)
plt.xscale('log')
plt.yscale('log')
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.ylim([0.02,0.3])
plt.xlabel(r'Size of training dataset',fontsize=16)
plt.ylabel(r'Median fractional error',fontsize=16)
plt.savefig('plots/loss_trainingsize.pdf',dpi=300)
plt.show()
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
nids = np.arange(20,2020,20)
ism_errs_test = np.load('ism_errs_stdsc_test.npy')
wind_errs_test = np.load('wind_errs_stdsc_test.npy')
print('Minimum median error for ISM environment: ' +str(ism_errs_test.min()) +' for NN model: '+ 'model-stdsc-'+str(nids[ism_errs_test.argmin()]) +'.hdf5')
print('Minimum median error for wind environment: ' +str(wind_errs_test.min()) +' for NN model: '+ 'model-wind-stdsc-'+str(nids[wind_errs_test.argmin()]) +'.hdf5')
plt.style.use('science')
plt.figure(figsize=(7,3.625))
plt.plot(nids,wind_errs_test,label='wind environment')
plt.plot(nids,ism_errs_test,label='ISM environment')
ax = plt.gca()
plt.xlabel(r'Epoch',fontsize=16)
plt.ylabel(r'Median fractional error',fontsize=16)
matplotlib.rcParams['legend.frameon'] = True
plt.legend(loc='upper right',fontsize=12)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.ylim([0.012,0.05])
plt.margins(0,0)
plt.savefig('plots/loss_per_epoch.pdf',dpi=300)
model_ism = keras.models.load_model(rootdir+'model-ism-final.hdf5', compile=False)
model_wind = keras.models.load_model(rootdir+'model-wind-final.hdf5', compile=False)
test_predictions_ism = model_ism.predict(test_dataset_scaled_ism)
test_predictions_unscaled_ism = scaler_out_ism.inverse_transform(test_predictions_ism)
test_predictions_ism = 10**test_predictions_unscaled_ism
labels_ism = 10**test_labels_ism
test_predictions_wind = model_wind.predict(test_dataset_scaled_wind)
test_predictions_unscaled_wind = scaler_out_wind.inverse_transform(test_predictions_wind)
test_predictions_wind = 10**test_predictions_unscaled_wind
labels_wind = 10**test_labels_wind
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
plt.style.use('science')
zerocount_ism = np.sum(np.isnan(labels_ism),axis=1)
zerocount_wind = np.sum(np.isnan(labels_wind),axis=1)
err_ism = (test_predictions_ism-labels_ism)/labels_ism
err_wind = (test_predictions_wind-labels_wind)/labels_wind
tdata = np.geomspace(0.1,1000,117)
tbegin = 30
tend = 116
print(tdata[tbegin],tdata[tend])
test_predictions_ism_cut = test_predictions_ism[:,tbegin:tend+1]
test_predictions_wind_cut = test_predictions_wind[:,tbegin:tend+1]
labels_ism_cut = labels_ism.iloc[:,tbegin:tend+1]
labels_wind_cut = labels_wind.iloc[:,tbegin:tend+1]
zerocount_ism_cut = np.sum(np.isnan(labels_ism_cut),axis=1)
zerocount_wind_cut = np.sum(np.isnan(labels_wind_cut),axis=1)
err_ism_cut = (test_predictions_ism_cut-labels_ism_cut)/labels_ism_cut
err_wind_cut = (test_predictions_wind_cut-labels_wind_cut)/labels_wind_cut
print('Median MeanFE pruned ISM')
print(np.nanmedian(np.nanmean(abs(err_ism_cut),axis=1)))
print('Median MeanFE all ISM')
print(np.nanmedian(np.nanmean(abs(err_ism),axis=1)))
plt.figure(figsize=(7,3.625))
plt.hist(np.log10(np.nanmean(abs(err_ism),axis=1)),bins=100,histtype='barstacked',edgecolor='black',linewidth=0.5,alpha=0.8,label=r'ISM environment',density=True)
plt.hist(np.log10(np.nanmean(abs(err_ism_cut),axis=1)),bins=100,histtype='barstacked',edgecolor='black',linewidth=0.5,alpha=0.8,label=r'Pruned data set',density=True)
plt.xlabel(r'$\mathrm{\log_{10}}$ MeanFE',fontsize=16)
plt.ylabel(r'Density',fontsize=16)
plt.xlim([-2.5,1.0])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
matplotlib.rcParams['legend.frameon'] = True
plt.legend(loc='upper right',fontsize=12)
plt.margins(0,0)
plt.savefig('plots/LC_mean_ism.pdf',dpi=300)
plt.show()
print('Median MeanFE pruned wind')
print(np.nanmedian(np.nanmean(abs(err_wind_cut),axis=1)))
print('Median MeanFE all wind')
print(np.nanmedian(np.nanmean(abs(err_wind),axis=1)))
plt.figure(figsize=(7,3.625))
plt.hist(np.log10(np.nanmean(abs(err_wind),axis=1)),bins=100,color='maroon',histtype='barstacked',edgecolor='black',linewidth=0.5,alpha=0.8,label=r'Wind environment',density=True)
plt.hist(np.log10(np.nanmean(abs(err_wind_cut),axis=1)),bins=100,color='mediumturquoise',histtype='barstacked',edgecolor='black',linewidth=0.5,alpha=0.8,label=r'Pruned data set',density=True)
plt.xlabel(r'$\mathrm{\log_{10}}$ MeanFE',fontsize=16)
plt.ylabel(r'Density',fontsize=16)
plt.xlim([-2.5,1.0])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
matplotlib.rcParams['legend.frameon'] = True
plt.legend(loc='upper right',fontsize=12)
plt.margins(0,0)
plt.savefig('plots/LC_mean_wind.pdf',dpi=300)
plt.show()
print('Median MaxFE pruned ISM')
print(np.nanmedian(np.nanmax(abs(err_ism_cut),axis=1)))
print('Median MaxFE all ISM')
print(np.nanmedian(np.nanmax(abs(err_ism),axis=1)))
plt.figure(figsize=(7,3.625))
plt.hist(np.log10(np.nanmax(abs(err_ism),axis=1)),bins=100,histtype='barstacked',edgecolor='black',linewidth=0.5,alpha=0.8,label=r'ISM environment',density=True)
plt.hist(np.log10(np.nanmax(abs(err_ism_cut),axis=1)),bins=100,histtype='barstacked',edgecolor='black',linewidth=0.5,alpha=0.7,label=r'Pruned data set',density=True)
plt.xlabel(r'$\mathrm{\log_{10}}$ MaxFE',fontsize=16)
plt.ylabel(r'Density',fontsize=16)
plt.xlim([-2.,1.5])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
matplotlib.rcParams['legend.frameon'] = True
plt.legend(loc='upper right',fontsize=12)
plt.margins(0,0)
plt.savefig('plots/LC_max_ism.pdf',dpi=300)
plt.show()
print('Median MaxFE pruned wind')
MaxFE_wind_cut = np.nanmax(abs(err_wind_cut),axis=1)
print(np.nanmedian(MaxFE_wind_cut))
MaxFE_wind = np.nanmax(abs(err_wind),axis=1)
print('Median MaxFE all wind')
print(np.nanmedian(MaxFE_wind))
plt.figure(figsize=(7,3.625))
plt.hist(np.log10(np.nanmax(abs(err_wind),axis=1)),bins=100,color = 'maroon',histtype='barstacked',edgecolor='black',linewidth=0.5,alpha=0.8,label=r'Wind environment',density=True)
plt.hist(np.log10(np.nanmax(abs(err_wind_cut),axis=1)),bins=100,color='mediumturquoise',histtype='barstacked',edgecolor='black',linewidth=0.5,alpha=0.7,label=r'Pruned data set',density=True)
plt.xlabel(r'$\mathrm{\log_{10}}$ MaxFE',fontsize=16)
plt.ylabel(r'Density',fontsize=16)
plt.xlim([-2.,1.5])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
matplotlib.rcParams['legend.frameon'] = True
plt.legend(loc='upper right',fontsize=12)
plt.margins(0,0)
plt.savefig('plots/LC_max_wind.pdf',dpi=300)
plt.show()
notcovered_ism = len(train_labels_ism[np.any(np.isnan(train_labels_ism.iloc[:,:]),axis=1)])/len(train_labels_ism)
notcovered_wind = len(train_labels_wind[np.any(np.isnan(train_labels_wind.iloc[:,:]),axis=1)])/len(train_labels_wind)
print(notcovered_ism,notcovered_wind)
GRB_file = 'grb970508data-cropped.dat'
colnames=['time', 'freq', 'flux', 'error']
GRB_data = pd.read_csv(GRB_file,skiprows=1,names=colnames)
mask = GRB_data['flux']!=0
FE = GRB_data['error'][mask]/abs(GRB_data['flux'][mask])
print(np.nanmedian(FE),np.nanmean(FE))
plt.hist(np.log10(FE))
```
| PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/gym_compat/scaler_attr_converter.py |
import copy
import numpy as np
from grid2op.dtypes import dt_float
from grid2op.gym_compat.utils import GYM_AVAILABLE, GYMNASIUM_AVAILABLE
class __AuxScalerAttrConverter:
"""
This is a scaler that transforms a initial gym space `init_space` into its scale version.
It can be use to scale the observation by substracting the mean and dividing by the variance for
example.
TODO work in progress !
Need help if you can :-)
.. warning::
Depending on the presence absence of gymnasium and gym packages this class might behave differently.
In grid2op we tried to maintain compatibility both with gymnasium (newest) and gym (legacy,
no more maintained) RL packages. The behaviour is the following:
- :class:`ScalerAttrConverter` will inherit from gymnasium if it's installed
(in this case it will be :class:`ScalerAttrConverterGymnasium`), otherwise it will
inherit from gym (and will be exactly :class:`ScalerAttrConverterLegacyGym`)
- :class:`ScalerAttrConverterGymnasium` will inherit from gymnasium if it's available and never from
from gym
- :class:`ScalerAttrConverterLegacyGym` will inherit from gym if it's available and never from
from gymnasium
See :ref:`gymnasium_gym` for more information
"""
def __init__(self, substract, divide, dtype=None, init_space=None):
super().__init__(
g2op_to_gym=None, gym_to_g2op=None, space=None
) # super should be from type BaseGymAttrConverter
self._substract = np.array(substract)
self._divide = np.array(divide)
self.dtype = dtype if dtype is not None else dt_float
if init_space is not None:
self.initialize_space(init_space)
def initialize_space(self, init_space):
if self._is_init_space:
return
if not isinstance(init_space, type(self)._BoxType):
raise RuntimeError(
"Impossible to scale a converter if this one is not from type space.Box"
)
tmp_space = copy.deepcopy(init_space)
# properly change the low / high value
low_tmp = self.scale(tmp_space.low)
high_tmp = self.scale(tmp_space.high)
low_ = np.minimum(high_tmp, low_tmp)
high_ = np.maximum(high_tmp, low_tmp)
tmp_space.low[:] = low_
tmp_space.high[:] = high_
if self.dtype is not None:
tmp_space.dtype = np.dtype(self.dtype)
tmp_space.low = tmp_space.low.astype(self.dtype)
tmp_space.high = tmp_space.high.astype(self.dtype)
self.base_initialize(
space=tmp_space, g2op_to_gym=self.scale, gym_to_g2op=self.unscale
)
self.dtype = self.my_space.dtype
self._substract = self._substract.astype(self.dtype)
self._divide = self._divide.astype(self.dtype)
self._is_init_space = True
def scale(self, vect):
tmp = vect.astype(self.dtype)
tmp = (tmp - self._substract) / self._divide
return tmp
def unscale(self, vect):
tmp = vect * self._divide + self._substract
return tmp
def close(self):
pass
if GYM_AVAILABLE:
from gym.spaces import Box as LegacyGymBox
from grid2op.gym_compat.base_gym_attr_converter import BaseLegacyGymAttrConverter
ScalerAttrConverterLegacyGym = type("ScalerAttrConverterLegacyGym",
(__AuxScalerAttrConverter, BaseLegacyGymAttrConverter, ),
{"_gymnasium": False,
"_BoxType": LegacyGymBox,
"__module__": __name__})
ScalerAttrConverterLegacyGym.__doc__ = __AuxScalerAttrConverter.__doc__
ScalerAttrConverter = ScalerAttrConverterLegacyGym
ScalerAttrConverter.__doc__ = __AuxScalerAttrConverter.__doc__
if GYMNASIUM_AVAILABLE:
from gymnasium.spaces import Box
from grid2op.gym_compat.base_gym_attr_converter import BaseGymnasiumAttrConverter
ScalerAttrConverterGymnasium = type("ScalerAttrConverterGymnasium",
(__AuxScalerAttrConverter, BaseGymnasiumAttrConverter, ),
{"_gymnasium": True,
"_BoxType": Box,
"__module__": __name__})
ScalerAttrConverterGymnasium.__doc__ = __AuxScalerAttrConverter.__doc__
ScalerAttrConverter = ScalerAttrConverterGymnasium
ScalerAttrConverter.__doc__ = __AuxScalerAttrConverter.__doc__ | PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/reg/lag/remove.py | import pandas as pd
from functools import partial
from statsmodels.api import OLS
class SimplifiedBase:
def _set_attrs(self, attr_dict):
for attr in attr_dict:
setattr(self, attr, attr_dict[attr])
class SimplifiedRegressionResult(SimplifiedBase):
direct_attrs = [
'params', 'pvalues', 'tvalues', 'nobs', 'rsquared_adj', 'bse', 'conf_int',
'normalized_cov_params', 'cov_params_default', 'scale', 'cov_params',
't_test'
]
model_attrs = ['exog_names', 'endog_names']
def __init__(self, **kwargs):
_validate_attrs(kwargs, self.direct_attrs + self.model_attrs)
model_kwargs = SimplifiedRegressionResult.pop_model_attrs(kwargs)
self._set_attrs(kwargs)
self.model = SimplifiedModel(**model_kwargs)
@classmethod
def from_statsmodels_result(cls, result):
# Get direct attributes
result_dict = _extract_attrs_into_dict(result, cls.direct_attrs)
# Get attributes of model
result_dict.update(_extract_attrs_into_dict(result.model, cls.model_attrs))
return cls(**result_dict)
@classmethod
def pop_model_attrs(cls, attr_dict):
"""
Note: pops from attr_dict inplace
"""
outdict = {}
for attr in attr_dict:
if attr in cls.model_attrs:
outdict[attr] = attr_dict[attr]
# Must pop separately as cannot change size of iterating dict
[attr_dict.pop(attr) for attr in outdict]
return outdict
class SimplifiedModel(SimplifiedBase):
def __init__(self, **kwargs):
self._set_attrs(kwargs)
class UnsupportedResultAttributeException(Exception):
pass
def _validate_attrs(attr_dict, valid_attrs):
for attr in attr_dict:
if attr not in valid_attrs:
raise UnsupportedResultAttributeException(f'Attribute {attr} not supported for SimplifiedRegressionResult')
def _extract_attrs_into_dict(obj, attrs):
result_dict = {}
# Get direct attributes
for attr in attrs:
value = getattr(obj, attr)
if isinstance(value, (pd.Series, pd.DataFrame, list, dict)):
value = value.copy()
result_dict[attr] = value
return result_dict
def remove_lag_names_from_reg_results(reg_list, lags=(1,)):
"""
Note: partially inplace
"""
out_reg_list = []
for ambiguous_result in reg_list:
# Determine type of result
if isinstance(ambiguous_result, tuple): # Tuple of result, fe_dict
result = ambiguous_result[0]
else: # just a single result
result = ambiguous_result
# Actually replace names
result = _remove_lag_name_from_reg_result(result, lags=lags)
# Add to output, depending on type of result
if isinstance(ambiguous_result, tuple): # Tuple of result, fe_dict
out_reg_list.append((result, ambiguous_result[1]))
else: # just a single result
out_reg_list.append(result)
return out_reg_list
def _remove_lag_name_from_reg_result(result, lags=(1,)):
"""
Note: partially inplace
"""
result = SimplifiedRegressionResult.from_statsmodels_result(result)
# Modify base properties inplace
[
_remove_lag_names_from_ambiguous_property(getattr(result, item), lags=lags) for item in (
'params', 'pvalues', 'tvalues', 'bse', 'normalized_cov_params'
)
]
# Modify model properties and reassign (functions not inplace)
for attr in ['endog_names', 'exog_names']:
setattr(
result.model,
attr,
_remove_lag_names_from_ambiguous_property(
getattr(result.model, attr),
lags=lags)
)
return result
def _remove_lag_names_from_ambiguous_property(ambiguous, lags=(1,)):
"""
Note: Series and DataFrame operations inplace, str and list operations not inplace
"""
if isinstance(ambiguous, pd.DataFrame):
lag_func = partial(_remove_lag_names_from_df_index_and_columns, ambiguous)
elif isinstance(ambiguous, pd.Series):
lag_func = partial(_remove_lag_names_from_series_index, ambiguous)
elif isinstance(ambiguous, str):
lag_func = partial(_remove_lag_names_from_varname, ambiguous)
elif isinstance(ambiguous, list):
lag_func = partial(_remove_lag_names_from_list, ambiguous)
else:
raise ValueError(f'Must pass DataFrame, Series, str, or list. Got type {type(ambiguous)}')
return lag_func(lags=lags)
def _remove_lag_names_from_df_index_and_columns(df, lags=(1,)):
"""
Note: inplace
"""
[_remove_one_lag_names_from_df_index_and_columns(df, num_lags=num_lags) for num_lags in lags]
def _remove_lag_names_from_series_index(series, lags=(1,)):
"""
Note: inplace
"""
[_remove_one_lag_names_from_series_index(series, num_lags=num_lags) for num_lags in lags]
def _remove_one_lag_names_from_df_index_and_columns(df, num_lags=1):
"""
Note: inplace
"""
rename_dict = {col: lag_varname_to_varname(col, num_lags=num_lags) for col in df.index}
df.index = df.index.to_series().replace(rename_dict)
df.columns = df.columns.to_series().replace(rename_dict)
def _remove_one_lag_names_from_series_index(series, num_lags=1):
"""
Note: inplace
"""
rename_dict = {col: lag_varname_to_varname(col, num_lags=num_lags) for col in series.index}
series.index = series.index.to_series().replace(rename_dict)
def _remove_lag_names_from_list(list_, lags=(1,)):
for lag in lags:
list_ = _remove_one_lag_names_from_list(list_, lag)
return list_
def _remove_one_lag_names_from_list(list_, num_lags=1):
return [lag_varname_to_varname(item, num_lags=num_lags) for item in list_]
def _remove_lag_names_from_varname(varname, lags=(1,)):
for lag in lags:
varname = lag_varname_to_varname(varname, lag)
return varname
def lag_varname_to_varname(varname, num_lags=1):
return varname.replace(rf'$_{{t - {num_lags}}}$', '') | PypiClean |
/Flask-Bootstrap-Components-0.2.0.tar.gz/Flask-Bootstrap-Components-0.2.0/flask_bootstrap_components/nav.py | from markupsafe import Markup
from fnmatch import fnmatchcase
from flask import request
from .markup import *
from .utils import url_or_url_for
class NavItem:
__slots__ = ["label", "target", "args", "preserve_args",
"subendpoint_pattern"]
def __init__(self, label, target,
args={},
preserve_args=[],
subendpoints=False,
subendpoint_pattern=None):
self.label = label
self.target = target
self.args = args
self.preserve_args = preserve_args
if subendpoints:
subendpoint_pattern = target + "*"
self.subendpoint_pattern = subendpoint_pattern
@property
def is_active(self):
if request.endpoint == self.target:
return True
if self.subendpoint_pattern:
return fnmatchcase(request.endpoint, self.subendpoint_pattern)
return False
@property
def url(self):
params = self.args.copy()
for i in self.preserve_args:
params[i] = request.view_args[i]
return url_or_url_for(self.target, **params)
@property
def a_attrs(self):
klasses = ["nav-link"]
if self.is_active:
klasses.append("active")
return {'href': self.url, "class": " ".join(klasses)}
@property
def li_attrs(self):
return {"class": "nav-item"}
def __html__(self):
link = element('a',
self.a_attrs,
self.label)
return element('li', self.li_attrs, link)
class Nav:
item_class = NavItem
def __init__(self, preserve_args=[]):
self.items = []
self.preserve_args = preserve_args
def add_item(self, item):
self.items.append(item)
def add(self, label, target, preserve_args=None, **kwargs):
if preserve_args is None:
preserve_args = self.preserve_args
self.add_item(self.item_class(label,
target,
preserve_args=preserve_args,
**kwargs))
@property
def ul_attrs(self):
return {"class": "nav"}
def __html__(self):
return element('ul', self.ul_attrs,
Markup("").join(self.items))
class NavTabs(Nav):
@property
def ul_attrs(self):
return {"class": "nav nav-tabs"}
class NavPills(Nav):
@property
def ul_attrs(self):
return {"class": "nav nav-pills"} | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/dtl/filter/misc.js.uncompressed.js | define("dojox/dtl/filter/misc", [
"dojo/_base/lang",
"dojo/_base/json", // dojo.toJson
"../_base"
], function(lang,json,dd){
/*=====
dd = dojox.dtl;
=====*/
lang.getObject("dojox.dtl.filter.misc", true);
lang.mixin(dd.filter.misc, {
filesizeformat: function(value){
// summary: Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102bytes, etc).
value = parseFloat(value);
if(value < 1024){
return (value == 1) ? value + " byte" : value + " bytes";
}else if(value < 1024 * 1024){
return (value / 1024).toFixed(1) + " KB";
}else if(value < 1024 * 1024 * 1024){
return (value / 1024 / 1024).toFixed(1) + " MB";
}
return (value / 1024 / 1024 / 1024).toFixed(1) + " GB";
},
pluralize: function(value, arg){
// summary:
// Returns a plural suffix if the value is not 1, for '1 vote' vs. '2 votes'
// description:
// By default, 's' is used as a suffix; if an argument is provided, that string
// is used instead. If the provided argument contains a comma, the text before
// the comma is used for the singular case.
arg = arg || 's';
if(arg.indexOf(",") == -1){
arg = "," + arg;
}
var parts = arg.split(",");
if(parts.length > 2){
return "";
}
var singular = parts[0];
var plural = parts[1];
if(parseInt(value, 10) != 1){
return plural;
}
return singular;
},
_phone2numeric: { a: 2, b: 2, c: 2, d: 3, e: 3, f: 3, g: 4, h: 4, i: 4, j: 5, k: 5, l: 5, m: 6, n: 6, o: 6, p: 7, r: 7, s: 7, t: 8, u: 8, v: 8, w: 9, x: 9, y: 9 },
phone2numeric: function(value){
// summary: Takes a phone number and converts it in to its numerical equivalent
var dm = dd.filter.misc;
value = value + "";
var output = "";
for(var i = 0; i < value.length; i++){
var chr = value.charAt(i).toLowerCase();
(dm._phone2numeric[chr]) ? output += dm._phone2numeric[chr] : output += value.charAt(i);
}
return output;
},
pprint: function(value){
// summary: A wrapper around toJson unless something better comes along
return json.toJson(value);
}
});
return dojox.dtl.filter.misc;
}); | PypiClean |
/COMPAS-1.17.5.tar.gz/COMPAS-1.17.5/src/compas/data/encoders.py | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import json
import platform
import uuid
from compas.data.exceptions import DecoderError
# We don't do this from `compas.IPY` to avoid circular imports
if "ironpython" == platform.python_implementation().lower():
try:
from System.Collections.Generic import IDictionary
except: # noqa: E722
IDictionary = None
else:
IDictionary = None
def cls_from_dtype(dtype):
"""Get the class object corresponding to a COMPAS data type specification.
Parameters
----------
dtype : str
The data type of the COMPAS object in the following format:
'{}/{}'.format(o.__class__.__module__, o.__class__.__name__).
Returns
-------
:class:`~compas.base.Base`
Raises
------
ValueError
If the data type is not in the correct format.
ImportError
If the module can't be imported.
AttributeError
If the module doesn't contain the specified data type.
"""
mod_name, attr_name = dtype.split("/")
module = __import__(mod_name, fromlist=[attr_name])
return getattr(module, attr_name)
class DataEncoder(json.JSONEncoder):
"""Data encoder for custom JSON serialization with support for COMPAS data structures and geometric primitives.
The encoder adds the following conversions to the JSON serialisation process:
* Numpy objects to their Python equivalents;
* iterables to lists; and
* :class:`~compas.data.Data` objects,
such as geometric primitives and shapes, data structures, robots, ...,
to a dict with the following structure: ``{'dtype': o.dtype, 'value': o.data}``
See Also
--------
compas.data.Data
compas.data.DataDecoder
Examples
--------
Explicit use case.
>>> import json
>>> import compas
>>> from compas.data import DataEncoder
>>> from compas.geometry import Point
>>> point = Point(0, 0, 0)
>>> with open(compas.get('point.json'), 'w') as f:
... json.dump(point, f, cls=DataEncoder)
...
Implicit use case.
>>> from compas.data import json_dump
>>> from compas.geometry import Point
>>> point = Point(0, 0, 0)
>>> json_dump(point, compas.get('point.json'))
"""
def default(self, o):
"""Return an object in serialized form.
Parameters
----------
o : object
The object to serialize.
Returns
-------
str
The serialized object.
"""
if hasattr(o, "to_data"):
value = o.to_data()
if hasattr(o, "dtype"):
dtype = o.dtype
else:
dtype = "{}/{}".format(
".".join(o.__class__.__module__.split(".")[:-1]),
o.__class__.__name__,
)
return {"dtype": dtype, "value": value, "guid": str(o.guid)}
if hasattr(o, "__next__"):
return list(o)
try:
import numpy as np
except ImportError:
pass
else:
if isinstance(o, np.ndarray):
return o.tolist()
if isinstance(
o,
(
np.int_,
np.intc,
np.intp,
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
):
return int(o)
if isinstance(o, (np.float_, np.float16, np.float32, np.float64)):
return float(o)
if isinstance(o, np.bool_):
return bool(o)
if isinstance(o, np.void):
return None
return super(DataEncoder, self).default(o)
class DataDecoder(json.JSONDecoder):
"""Data decoder for custom JSON serialization with support for COMPAS data structures and geometric primitives.
The decoder hooks into the JSON deserialisation process
to reconstruct :class:`~compas.data.Data` objects,
such as geometric primitives and shapes, data structures, robots, ...,
from the serialized data when possible.
The reconstruction is possible if
* the serialized data has the following structure: ``{"dtype": "...", 'value': {...}}``;
* a class can be imported into the current scope from the info in ``o["dtype"]``; and
* the imported class has a method ``from_data``.
See Also
--------
compas.data.Data
compas.data.DataEncoder
Examples
--------
Explicit use case.
>>> import json
>>> import compas
>>> from compas.data import DataDecoder
>>> with open(compas.get('point.json'), 'r') as f:
... point = json.load(f, cls=DataDecoder)
...
Implicit use case.
>>> from compas.data import json_load
>>> point = json_load(compas.get('point.json'))
"""
def __init__(self, *args, **kwargs):
super(DataDecoder, self).__init__(object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, o):
"""Reconstruct a deserialized object.
Parameters
----------
o : object
Returns
-------
object
A (reconstructed), deserialized object.
"""
if "dtype" not in o:
return o
try:
cls = cls_from_dtype(o["dtype"])
except ValueError:
raise DecoderError(
"The data type of the object should be in the following format: '{}/{}'".format(
o.__class__.__module__, o.__class__.__name__
)
)
except ImportError:
raise DecoderError("The module of the data type can't be found: {}.".format(o["dtype"]))
except AttributeError:
raise DecoderError("The data type can't be found in the specified module: {}.".format(o["dtype"]))
obj_value = o["value"]
# Kick-off from_data from a rebuilt Python dictionary instead of the C# data type
if IDictionary and isinstance(o, IDictionary[str, object]):
obj_value = {key: obj_value[key] for key in obj_value.Keys}
obj = cls.from_data(obj_value)
if "guid" in o:
obj._guid = uuid.UUID(o["guid"])
return obj | PypiClean |
/FlyForms-1.0.0b1.tar.gz/FlyForms-1.0.0b1/docs/source/api/form.rst | .. _Form_api:
.. module:: flyforms.core
Forms
-----
Class :py:class:`.Form` is the root class of FlyForms that provide the highest level API for data structures
validation and mapping. All user-defined forms must inherit this class.
Form class
~~~~~~~~~~
.. autoclass:: Form
**Properties**
.. autoattribute:: is_bound
.. autoattribute:: is_valid
**Attributes**
.. py:attribute:: _raw_data
Normal Python :code:`dict` contains all Form data (even unbind fields)
.. py:attribute:: _fields
Python :code:`set` contains all defined fields names
.. py:attribute:: _meta
Instance of :py:class:`.FormMetaOptions` contains Form meta information
**Methods**
.. automethod:: to_python
.. automethod:: validate
For more information about :py:meth:`.validate` method see :ref:`in_flight_validation`.
Defining Forms
~~~~~~~~~~~~~~
Forms defining is quite simply process. All you need is to make a subclass of :py:class:`.Form`
and define fields as class attributes.
If you need to extend Forms, inheritance is available.
New Form will contain all fields of the parent form as well as it's own.
.. literalinclude:: ../../examples/form_inheritance.py
:language: python
By the way, multiple Forms inheritance is also possible. It will be done in full compliance with the Python MRO_:
.. literalinclude:: ../../examples/form_multiply_inheritance.py
:language: python
.. _MRO: https://www.python.org/download/releases/2.3/mro/
Using Forms
~~~~~~~~~~~
Typical Forms usage schema looks like:
.. code-block:: python
class MyForm(Form):
# Your form definition
if __name__ == '__main__':
f = MyForm(**data) # form instantiation
if f.is_valid:
# do something with form
else:
# handle form errors
.. note:: Verification form by :py:attr:`.is_valid` is a more generalized than :py:attr:`.is_bound`.
In general, Form may be *bound* but *not valid*.
Therefore, we recommend you to use :py:attr:`.is_valid` property for Form bound verification.
For a better understanding of the internal structure of the :py:class:`.Form` class see :ref:`Low_levelAPI` section.
When Form instantiated, you can access bind data within form instance attributes:
.. code-block:: python
class MyForm(Form):
field = StringField()
if __name__ == '__main__':
f = MyForm(**data) # form instantiation
print(f.field) # >> bound value will be printed
FlyForms :py:class:`.Field` API allow you to set default values for form fields. You can use it together with passing
:code:`required=False` to field constructor (for more information about :py:class:`.Field` API see :ref:`Fields_api`).
If you do not pass the value of not required field during form instantiation, you`ll got the default value:
.. code-block:: python
class MyForm(Form):
field = StringField(required=False, default="Hello!")
if __name__ == '__main__':
f = MyForm() # form instantiation
print(f.field) # >> Hello!
But if you'll pass :code:`required=False` to field constructor without passing a default value,
you`ll got an unbound field:
.. code-block:: python
class MyForm(Form):
field = StringField(required=False)
if __name__ == '__main__':
f = MyForm() # form instantiation
print(f.field) # >> <UnboundStringField(field, errors: {})>
By default, representation of unbound fields provided by :py:class:`.UnboundField`.
If you want to access all bound data, use :py:meth:`.to_python()` method that returns :code:`dict` which contains all bound data.
But if there are any errors, :py:class:`UnboundForm` exception will be raised.
.. autoclass:: UnboundForm
FlyForms also provides you to dump Form data into json string within the :py:meth:`.to_json()` method.
.. _in_flight_validation:
In flight data validation
~~~~~~~~~~~~~~~~~~~~~~~~~
If you already have defined Form and you want to just validate a data structure within that, you can use
:py:meth:`.Form.validate` class method.
**Goal**
The main goal is that no new objects will be created when you call :py:meth:`.Form.validate`.
**Usage**
.. literalinclude:: ../../examples/validate_schema_usage.py
:language: python
Unbound fields rendering
~~~~~~~~~~~~~~~~~~~~~~~~
By default, all unbound fields are replaced with :py:class:`.UnboundField` instances.
You can customize it using :py:attr:`.FormMetaOptions.unbound_field_render` in :py:class:`.FormMetaOptions`
definition for your Form.
.. autoclass:: UnboundField
Forms customization
~~~~~~~~~~~~~~~~~~~
You can define an :code:`Meta` class in your Form definition to customize it`s behaviour.
**API**
.. autoclass:: FormMetaOptions
**Usage**
.. literalinclude:: ../../examples/meta_usage.py
:language: python
.. note:: There is no inheritance of :code:`Meta` class.
It`ll have an effect only in a form where it has been defined.
**But** if you use :py:class:`.EmbeddedFormField` with form in which the :code:`Meta` class defined, it
:py:attr:`.FormMetaOptions.skip_extra` attribute will be used to customize it binding process.
.. _Low_levelAPI:
Low-level API
~~~~~~~~~~~~~
.. warning:: This section provides information about core classes API of FlyForms. You should not use it, but understanding.
It is necessary to extending and form behavior customization in some specific cases.
.. autoclass:: FormMeta
.. autoclass:: FormField
.. automethod:: __get__
.. automethod:: __set__
Form data manipulations
~~~~~~~~~~~~~~~~~~~~~~~
.. module:: flyforms.form
Since version 1.0.0 FlyForms provides you to load and dump your Form data to JSON format.
We decided to bring this functionality into separate functions, collected in module :py:mod:`flyforms.form`.
For JSON encoding and decoding we use :code:`json` module.
Eventually, the data cached in Form constitute an ordinary Python :code:`dict`, so we decided to avoid complicating.
.. autofunction:: to_json
.. autofunction:: from_json
.. autofunction:: validate_json | PypiClean |
/Mapp-0.1.0.tar.gz/Mapp-0.1.0/mapp/performance/evaluation.py | from __future__ import division
import sys, os, math, argparse
#without append package path to sys.path mapp package couldn't be imported
sys.path.append(os.path.dirname(os.path.realpath(__file__ + '/../../')))
from mapp.utils.common import MappFile, parse_mutations
from mapp.utils.common import AminoAcids as aalist
def main():
''' This script takes mapp file and create
csv table of evaluated values. '''
description = "This script takes mapp file and mutations file and create evaluation of analysis."
neutralhelp = "File with neutral mutations in format letterNUMBERletter."
deleterhelp = "File with deleterious mutations in format letterNUMBERletter."
outhelp = "Output file in csv format. Delimited by tabs."
filehelp = "Mapp file."
parser = argparse.ArgumentParser(description)
parser.add_argument('-n', required=True, help=neutralhelp, dest='neut', type=argparse.FileType('r'))
parser.add_argument('-d', required=True, help=deleterhelp, dest='dele', type=argparse.FileType('r'))
parser.add_argument('-o', required=True, help=outhelp, dest='out', type=argparse.FileType('w'))
parser.add_argument('file', metavar='F', help=filehelp)
args = parser.parse_args()
mutations = list()
for mutation in parse_mutations(args.neut.read()):
mutations.append((mutation, "neu"))
args.neut.close()
for mutation in parse_mutations(args.dele.read()):
mutations.append((mutation, "del"))
args.dele.close()
evaluate = evaluate_file(mutations, args.file)
for line in evaluate:
args.out.write("\t".join(map(str, line)) + "\n")
args.out.close()
def evaluate_file(mutations, mappfile):
'''
It evaluates mapp mappfile in argument. Results are saved in list.
One list line for each file. The list line is list of values.
:param mutations: list of doubles - [(mutation, type), ...]
- mutation is tuple (letter, number, letter)
- type is "del" or "neu"
:param mappfile: file that will be analyzed
:returns: list of evaluation lines. Every line is list of values.
First line is header.
'''
#first row is header
outlines = [["mappfile", "tp", "tn", "fp", "fn", "mutations", "accuracy",
"mcc", "coverage", "sensitivity", "specificity", "ppv", "npv"]]
f = open(mappfile, "r")
evaluation = evaluate_mapp(f, mutations)
f.close()
outlines.append([os.path.basename(mappfile),
evaluation["tp"],
evaluation["tn"],
evaluation["fp"],
evaluation["fn"],
evaluation["mutations"],
evaluation["accuracy"],
evaluation["mcc"],
evaluation["coverage"],
evaluation["sensitivity"],
evaluation["specificity"],
evaluation["ppv"],
evaluation["npv"]])
#end of for
return outlines
def evaluate_mapp(mapp, mutations, pvalue=0.1):
'''
Takes mapp and counts True Positives, False positives and another
measures according via list of mutations.
:param mapp: itereable with mapp lines (mapp header included)
:param mutations: list of doubles - [(mutation, type), ...]
- mutation is tuple (letter, number, letter)
- type is "del" or "neu"
:param pvalue: threshold for decision about protein mutation
:returns: dictionary with keys:
tp (true positive)
tn (true negative)
fp (false positive)
fn (false negative)
mutations (number of mutations)
accuracy
mcc
coverage (0.75 means that 3 of 4 mutations was predicted by mapp,
others wasn't predicted at all)
sensitivity
specificity
ppv (positive predictive value)
npv (negative predictive value)
'''
result = {"tp": 0, "tn":0, "fp":0, "fn":0, "mutations":0}
noteval = 0
mapplines = list(mapp)
for mutation in mutations:
mutnumber = mutation[0][1]
muttype = mutation[1]
mutletter = mutation[0][2]
result["mutations"] += 1
mappline = mapplines[mutnumber]
mappline = mappline.split("\t")
align_column = mappline[MappFile.alignmentOffset]
if mutation[0][0] not in align_column:
print("Mutation %s doesn't appear in alignment column %s!" % \
(str(mutation), align_column))
continue
if (mappline[MappFile.naOffset] == "N"): # values are predicted by mapp
mut_pvalue = float(mappline[MappFile.pvalueOffset
+ aalist.list.index(mutletter)])
if (mut_pvalue > pvalue): #mutation is predicted neutral
if (muttype == "neu"):
result["tn"] += 1
else:
result["fn"] += 1
else: #mutation is predicted deleterious
if (muttype == "del"):
result["tp"] += 1
else:
result["fp"] += 1
else: #prediction is not defined for this mutation
noteval += 1
result["accuracy"] = accuracy(result["tp"], result["fp"], result["tn"], result["fn"])
result["sensitivity"] = sensitivity(result["tp"], result["fn"])
result["mcc"] = mcc(result["tp"], result["fp"], result["tn"], result["fn"])
result["coverage"] = (result["mutations"] - noteval) / result["mutations"]
result["sensitivity"] = sensitivity(result["tp"], result["fn"])
result["specificity"] = specifity(result["tn"], result["fp"])
result["ppv"] = ppv(result["tp"], result["fp"])
result["npv"] = npv(result["tn"], result["fn"])
return result
def accuracy(tp, fp, tn, fn):
try:
return (tp / (tp + fn) + tn / (tn + fp)) / 2
except ZeroDivisionError:
return float('nan')
def sensitivity(tp, fn):
try:
return tp / (tp + fn)
except ZeroDivisionError:
return float('nan')
def specifity(tn, fp):
try:
return tn / (fp + tn)
except ZeroDivisionError:
return float('nan')
def ppv(tp, fp):
try:
return tp / (tp + fp)
except ZeroDivisionError:
return float('nan')
def npv(tn, fn):
try:
return tn / (fn + tn)
except ZeroDivisionError:
return float('nan')
def mcc(tp, fp, tn, fn):
try:
return ((tp * tn) - (fp * fn)) / math.sqrt((tp + fn) * (tn + fp) * (tp + fp) * (tn + fn))
except ZeroDivisionError:
return float('nan')
if __name__ == "__main__":
main() | PypiClean |
/ISYlib-0.1.20150912c.tar.gz/ISYlib-0.1.20150912c/bin/isy_audit_pgm_x10.py |
__author__ = "Peter Shipley"
#
# quick hack to check what X10 device are in use by which programs
#
# then report :
# program name and X10 ids used
#
# summary list of all X10 ids
#
# TODO : add options
#
# print program's full path include parent folder
opt_fullpath = True
import xml.etree.ElementTree as ET
import ISY
def list_prog_x10(isy) :
# a set for all referanced cars
x10_used_all = set()
x10_use_count = dict()
if opt_fullpath :
name_width = 45
else :
name_width = 24
# iterate though all programs and program folders
for p in isy.prog_iter():
x10_used = [ ]
# skip root folder.
if p.id == '0001' :
continue
# get D2D src for program
src_xml = isy.prog_get_src(p.id)
# print "src_xml", src_xml
# parse the D2D code ( XML format )
src_info = ET.fromstring(src_xml)
# find all the referances to program x10
# and store them in an array
for v in src_info.iter("x10") :
try :
hc = v.find('hc').text
uc = v.find('uc')
if uc is not None :
uc = uc.text
else :
uc = ""
xid = hc + ":" + uc
x10_used.append(xid)
x10_use_count[xid] = x10_use_count.get(xid, 0) + 1
except :
print "Error : ", src_xml
raise
# print "iter x10 : P ", p, "x10_used", x10_used
for v in src_info.iter("device") :
try :
no = v.find('node')
if no is None :
continue
if hasattr(no, 'text') and no.text is not None :
no_addr = no.text.split()
else :
continue
if no_addr[0] == "FF" :
xid = "{:s}:{:d}".format(
( unichr(64 + int(no_addr[1], 16))),
int(no_addr[2], 16)
)
x10_used.append(xid)
x10_use_count[xid] = x10_use_count.get(xid, 0) + 1
except :
print "Error : ", src_xml
raise
# print "iter node : PID ", p, "x10_used", x10_used
# convert the array into a set
x10_used_set = set(x10_used)
x10_list = sorted(x10_used_set)
# add this set to total used set
x10_used_all.update(x10_used_set)
# referance program by name or full path
if p.parentId == '0001' or opt_fullpath == False :
pname = p.name
else :
pname = p.path
# if program has x10, print name and list x10 obj it contains.
if len(x10_list) > 0 :
print "{:<5}{:<{namew}} {!s}".format(p.id, pname, ", ".join(x10_list), namew=name_width)
# print all x10 that are used.
print "\nUsed X10 Ids (", len(x10_used_all), "): ",
print str(", ").join(sorted(x10_used_all))
if __name__ == '__main__' :
# open connection to ISY
# don't preload node, dont subscribe to updates
#
# get login / pass from from Env.
myisy = ISY.Isy(faststart=2,eventupdates=0,parsearg=1)
# preload programs
myisy.load_prog()
list_prog_x10(myisy)
exit(0) | PypiClean |
/Kr0nOs_Bot-3.3.11-py3-none-any.whl/redbot/cogs/trivia/trivia.py | import asyncio
import math
import pathlib
from collections import Counter
from typing import List
import io
import yaml
import discord
from redbot.core import Config, commands, checks
from redbot.cogs.bank import is_owner_if_bank_global
from redbot.core.data_manager import cog_data_path
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils.chat_formatting import box, pagify, bold
from redbot.core.utils.menus import start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
from .checks import trivia_stop_check
from .converters import finite_float
from .log import LOG
from .session import TriviaSession
__all__ = ["Trivia", "UNIQUE_ID", "get_core_lists"]
UNIQUE_ID = 0xB3C0E453
_ = Translator("Trivia", __file__)
class InvalidListError(Exception):
"""A Trivia list file is in invalid format."""
pass
@cog_i18n(_)
class Trivia(commands.Cog):
"""Play trivia with friends!"""
def __init__(self):
super().__init__()
self.trivia_sessions = []
self.config = Config.get_conf(self, identifier=UNIQUE_ID, force_registration=True)
self.config.register_guild(
max_score=10,
timeout=120.0,
delay=15.0,
bot_plays=False,
reveal_answer=True,
payout_multiplier=0.0,
allow_override=True,
)
self.config.register_member(wins=0, games=0, total_score=0)
@commands.group()
@commands.guild_only()
@checks.mod_or_permissions(administrator=True)
async def triviaset(self, ctx: commands.Context):
"""Manage Trivia settings."""
if ctx.invoked_subcommand is None:
settings = self.config.guild(ctx.guild)
settings_dict = await settings.all()
msg = box(
_(
"Current settings\n"
"Bot gains points: {bot_plays}\n"
"Answer time limit: {delay} seconds\n"
"Lack of response timeout: {timeout} seconds\n"
"Points to win: {max_score}\n"
"Reveal answer on timeout: {reveal_answer}\n"
"Payout multiplier: {payout_multiplier}\n"
"Allow lists to override settings: {allow_override}"
).format(**settings_dict),
lang="py",
)
await ctx.send(msg)
@triviaset.command(name="maxscore")
async def triviaset_max_score(self, ctx: commands.Context, score: int):
"""Set the total points required to win."""
if score < 0:
await ctx.send(_("Score must be greater than 0."))
return
settings = self.config.guild(ctx.guild)
await settings.max_score.set(score)
await ctx.send(_("Done. Points required to win set to {num}.").format(num=score))
@triviaset.command(name="timelimit")
async def triviaset_timelimit(self, ctx: commands.Context, seconds: finite_float):
"""Set the maximum seconds permitted to answer a question."""
if seconds < 4.0:
await ctx.send(_("Must be at least 4 seconds."))
return
settings = self.config.guild(ctx.guild)
await settings.delay.set(seconds)
await ctx.send(_("Done. Maximum seconds to answer set to {num}.").format(num=seconds))
@triviaset.command(name="stopafter")
async def triviaset_stopafter(self, ctx: commands.Context, seconds: finite_float):
"""Set how long until trivia stops due to no response."""
settings = self.config.guild(ctx.guild)
if seconds < await settings.delay():
await ctx.send(_("Must be larger than the answer time limit."))
return
await settings.timeout.set(seconds)
await ctx.send(
_(
"Done. Trivia sessions will now time out after {num} seconds of no responses."
).format(num=seconds)
)
@triviaset.command(name="override")
async def triviaset_allowoverride(self, ctx: commands.Context, enabled: bool):
"""Allow/disallow trivia lists to override settings."""
settings = self.config.guild(ctx.guild)
await settings.allow_override.set(enabled)
if enabled:
await ctx.send(
_("Done. Trivia lists can now override the trivia settings for this server.")
)
else:
await ctx.send(
_(
"Done. Trivia lists can no longer override the trivia settings for this "
"server."
)
)
@triviaset.command(name="botplays", usage="<true_or_false>")
async def trivaset_bot_plays(self, ctx: commands.Context, enabled: bool):
"""Set whether or not the bot gains points.
If enabled, the bot will gain a point if no one guesses correctly.
"""
settings = self.config.guild(ctx.guild)
await settings.bot_plays.set(enabled)
if enabled:
await ctx.send(_("Done. I'll now gain a point if users don't answer in time."))
else:
await ctx.send(_("Alright, I won't embarass you at trivia anymore."))
@triviaset.command(name="revealanswer", usage="<true_or_false>")
async def trivaset_reveal_answer(self, ctx: commands.Context, enabled: bool):
"""Set whether or not the answer is revealed.
If enabled, the bot will reveal the answer if no one guesses correctly
in time.
"""
settings = self.config.guild(ctx.guild)
await settings.reveal_answer.set(enabled)
if enabled:
await ctx.send(_("Done. I'll reveal the answer if no one knows it."))
else:
await ctx.send(_("Alright, I won't reveal the answer to the questions anymore."))
@is_owner_if_bank_global()
@checks.admin_or_permissions(manage_guild=True)
@triviaset.command(name="payout")
async def triviaset_payout_multiplier(self, ctx: commands.Context, multiplier: finite_float):
"""Set the payout multiplier.
This can be any positive decimal number. If a user wins trivia when at
least 3 members are playing, they will receive credits. Set to 0 to
disable.
The number of credits is determined by multiplying their total score by
this multiplier.
"""
settings = self.config.guild(ctx.guild)
if multiplier < 0:
await ctx.send(_("Multiplier must be at least 0."))
return
await settings.payout_multiplier.set(multiplier)
if multiplier:
await ctx.send(_("Done. Payout multiplier set to {num}.").format(num=multiplier))
else:
await ctx.send(_("Done. I will no longer reward the winner with a payout."))
@triviaset.group(name="custom")
@commands.is_owner()
async def triviaset_custom(self, ctx: commands.Context):
"""Manage Custom Trivia lists."""
pass
@triviaset_custom.command(name="list")
async def custom_trivia_list(self, ctx: commands.Context):
"""List uploaded custom trivia."""
personal_lists = sorted([p.resolve().stem for p in cog_data_path(self).glob("*.yaml")])
no_lists_uploaded = _("No custom Trivia lists uploaded.")
if not personal_lists:
if await ctx.embed_requested():
await ctx.send(
embed=discord.Embed(
colour=await ctx.embed_colour(), description=no_lists_uploaded
)
)
else:
await ctx.send(no_lists_uploaded)
return
if await ctx.embed_requested():
await ctx.send(
embed=discord.Embed(
title=_("Uploaded trivia lists"),
colour=await ctx.embed_colour(),
description=", ".join(sorted(personal_lists)),
)
)
else:
msg = box(
bold(_("Uploaded trivia lists")) + "\n\n" + ", ".join(sorted(personal_lists))
)
if len(msg) > 1000:
await ctx.author.send(msg)
else:
await ctx.send(msg)
@commands.is_owner()
@triviaset_custom.command(name="upload", aliases=["add"])
async def trivia_upload(self, ctx: commands.Context):
"""Upload a trivia file."""
if not ctx.message.attachments:
await ctx.send(_("Supply a file with next message or type anything to cancel."))
try:
message = await ctx.bot.wait_for(
"message", check=MessagePredicate.same_context(ctx), timeout=30
)
except asyncio.TimeoutError:
await ctx.send(_("You took too long to upload a list."))
return
if not message.attachments:
await ctx.send(_("You have cancelled the upload process."))
return
parsedfile = message.attachments[0]
else:
parsedfile = ctx.message.attachments[0]
try:
await self._save_trivia_list(ctx=ctx, attachment=parsedfile)
except yaml.error.MarkedYAMLError as exc:
await ctx.send(_("Invalid syntax: ") + str(exc))
except yaml.error.YAMLError:
await ctx.send(
_("There was an error parsing the trivia list. See logs for more info.")
)
LOG.exception("Custom Trivia file %s failed to upload", parsedfile.filename)
@commands.is_owner()
@triviaset_custom.command(name="delete", aliases=["remove"])
async def trivia_delete(self, ctx: commands.Context, name: str):
"""Delete a trivia file."""
filepath = cog_data_path(self) / f"{name}.yaml"
if filepath.exists():
filepath.unlink()
await ctx.send(_("Trivia {filename} was deleted.").format(filename=filepath.stem))
else:
await ctx.send(_("Trivia file was not found."))
@commands.group(invoke_without_command=True)
@commands.guild_only()
async def trivia(self, ctx: commands.Context, *categories: str):
"""Start trivia session on the specified category.
You may list multiple categories, in which case the trivia will involve
questions from all of them.
"""
if not categories:
await ctx.send_help()
return
categories = [c.lower() for c in categories]
session = self._get_trivia_session(ctx.channel)
if session is not None:
await ctx.send(_("There is already an ongoing trivia session in this channel."))
return
trivia_dict = {}
authors = []
for category in reversed(categories):
# We reverse the categories so that the first list's config takes
# priority over the others.
try:
dict_ = self.get_trivia_list(category)
except FileNotFoundError:
await ctx.send(
_(
"Invalid category `{name}`. See `{prefix}trivia list` for a list of "
"trivia categories."
).format(name=category, prefix=ctx.clean_prefix)
)
except InvalidListError:
await ctx.send(
_(
"There was an error parsing the trivia list for the `{name}` category. It "
"may be formatted incorrectly."
).format(name=category)
)
else:
trivia_dict.update(dict_)
authors.append(trivia_dict.pop("AUTHOR", None))
continue
return
if not trivia_dict:
await ctx.send(
_("The trivia list was parsed successfully, however it appears to be empty!")
)
return
settings = await self.config.guild(ctx.guild).all()
config = trivia_dict.pop("CONFIG", None)
if config and settings["allow_override"]:
settings.update(config)
settings["lists"] = dict(zip(categories, reversed(authors)))
session = TriviaSession.start(ctx, trivia_dict, settings)
self.trivia_sessions.append(session)
LOG.debug("New trivia session; #%s in %d", ctx.channel, ctx.guild.id)
@trivia_stop_check()
@trivia.command(name="stop")
async def trivia_stop(self, ctx: commands.Context):
"""Stop an ongoing trivia session."""
session = self._get_trivia_session(ctx.channel)
if session is None:
await ctx.send(_("There is no ongoing trivia session in this channel."))
return
await session.end_game()
session.force_stop()
await ctx.send(_("Trivia stopped."))
@trivia.command(name="list")
async def trivia_list(self, ctx: commands.Context):
"""List available trivia categories."""
lists = set(p.stem for p in self._all_lists())
if await ctx.embed_requested():
await ctx.send(
embed=discord.Embed(
title=_("Available trivia lists"),
colour=await ctx.embed_colour(),
description=", ".join(sorted(lists)),
)
)
else:
msg = box(bold(_("Available trivia lists")) + "\n\n" + ", ".join(sorted(lists)))
if len(msg) > 1000:
await ctx.author.send(msg)
else:
await ctx.send(msg)
@trivia.group(
name="leaderboard", aliases=["lboard"], autohelp=False, invoke_without_command=True
)
async def trivia_leaderboard(self, ctx: commands.Context):
"""Leaderboard for trivia.
Defaults to the top 10 of this server, sorted by total wins. Use
subcommands for a more customised leaderboard.
"""
cmd = self.trivia_leaderboard_server
if isinstance(ctx.channel, discord.abc.PrivateChannel):
cmd = self.trivia_leaderboard_global
await ctx.invoke(cmd, "wins", 10)
@trivia_leaderboard.command(name="server")
@commands.guild_only()
async def trivia_leaderboard_server(
self, ctx: commands.Context, sort_by: str = "wins", top: int = 10
):
"""Leaderboard for this server.
`<sort_by>` can be any of the following fields:
- `wins` : total wins
- `avg` : average score
- `total` : total correct answers
- `games` : total games played
`<top>` is the number of ranks to show on the leaderboard.
"""
key = self._get_sort_key(sort_by)
if key is None:
await ctx.send(
_(
"Unknown field `{field_name}`, see `{prefix}help trivia leaderboard server` "
"for valid fields to sort by."
).format(field_name=sort_by, prefix=ctx.clean_prefix)
)
return
guild = ctx.guild
data = await self.config.all_members(guild)
data = {guild.get_member(u): d for u, d in data.items()}
data.pop(None, None) # remove any members which aren't in the guild
await self.send_leaderboard(ctx, data, key, top)
@trivia_leaderboard.command(name="global")
async def trivia_leaderboard_global(
self, ctx: commands.Context, sort_by: str = "wins", top: int = 10
):
"""Global trivia leaderboard.
`<sort_by>` can be any of the following fields:
- `wins` : total wins
- `avg` : average score
- `total` : total correct answers from all sessions
- `games` : total games played
`<top>` is the number of ranks to show on the leaderboard.
"""
key = self._get_sort_key(sort_by)
if key is None:
await ctx.send(
_(
"Unknown field `{field_name}`, see `{prefix}help trivia leaderboard server` "
"for valid fields to sort by."
).format(field_name=sort_by, prefix=ctx.clean_prefix)
)
return
data = await self.config.all_members()
collated_data = {}
for guild_id, guild_data in data.items():
guild = ctx.bot.get_guild(guild_id)
if guild is None:
continue
for member_id, member_data in guild_data.items():
member = guild.get_member(member_id)
if member is None:
continue
collated_member_data = collated_data.get(member, Counter())
for v_key, value in member_data.items():
collated_member_data[v_key] += value
collated_data[member] = collated_member_data
await self.send_leaderboard(ctx, collated_data, key, top)
@staticmethod
def _get_sort_key(key: str):
key = key.lower()
if key in ("wins", "average_score", "total_score", "games"):
return key
elif key in ("avg", "average"):
return "average_score"
elif key in ("total", "score", "answers", "correct"):
return "total_score"
async def send_leaderboard(self, ctx: commands.Context, data: dict, key: str, top: int):
"""Send the leaderboard from the given data.
Parameters
----------
ctx : commands.Context
The context to send the leaderboard to.
data : dict
The data for the leaderboard. This must map `discord.Member` ->
`dict`.
key : str
The field to sort the data by. Can be ``wins``, ``total_score``,
``games`` or ``average_score``.
top : int
The number of members to display on the leaderboard.
Returns
-------
`list` of `discord.Message`
The sent leaderboard messages.
"""
if not data:
await ctx.send(_("There are no scores on record!"))
return
leaderboard = self._get_leaderboard(data, key, top)
ret = []
for page in pagify(leaderboard, shorten_by=10):
ret.append(await ctx.send(box(page, lang="py")))
return ret
@staticmethod
def _get_leaderboard(data: dict, key: str, top: int):
# Mix in average score
for member, stats in data.items():
if stats["games"] != 0:
stats["average_score"] = stats["total_score"] / stats["games"]
else:
stats["average_score"] = 0.0
# Sort by reverse order of priority
priority = ["average_score", "total_score", "wins", "games"]
try:
priority.remove(key)
except ValueError:
raise ValueError(f"{key} is not a valid key.")
# Put key last in reverse priority
priority.append(key)
items = data.items()
for key in priority:
items = sorted(items, key=lambda t: t[1][key], reverse=True)
max_name_len = max(map(lambda m: len(str(m)), data.keys()))
# Headers
headers = (
_("Rank"),
_("Member") + " " * (max_name_len - 6),
_("Wins"),
_("Games Played"),
_("Total Score"),
_("Average Score"),
)
lines = [" | ".join(headers), " | ".join(("-" * len(h) for h in headers))]
# Header underlines
for rank, tup in enumerate(items, 1):
member, m_data = tup
# Align fields to header width
fields = tuple(
map(
str,
(
rank,
member,
m_data["wins"],
m_data["games"],
m_data["total_score"],
round(m_data["average_score"], 2),
),
)
)
padding = [" " * (len(h) - len(f)) for h, f in zip(headers, fields)]
fields = tuple(f + padding[i] for i, f in enumerate(fields))
lines.append(" | ".join(fields).format(member=member, **m_data))
if rank == top:
break
return "\n".join(lines)
@commands.Cog.listener()
async def on_trivia_end(self, session: TriviaSession):
"""Event for a trivia session ending.
This method removes the session from this cog's sessions, and
cancels any tasks which it was running.
Parameters
----------
session : TriviaSession
The session which has just ended.
"""
channel = session.ctx.channel
LOG.debug("Ending trivia session; #%s in %s", channel, channel.guild.id)
if session in self.trivia_sessions:
self.trivia_sessions.remove(session)
if session.scores:
await self.update_leaderboard(session)
async def update_leaderboard(self, session):
"""Update the leaderboard with the given scores.
Parameters
----------
session : TriviaSession
The trivia session to update scores from.
"""
max_score = session.settings["max_score"]
for member, score in session.scores.items():
if member.id == session.ctx.bot.user.id:
continue
stats = await self.config.member(member).all()
if score == max_score:
stats["wins"] += 1
stats["total_score"] += score
stats["games"] += 1
await self.config.member(member).set(stats)
def get_trivia_list(self, category: str) -> dict:
"""Get the trivia list corresponding to the given category.
Parameters
----------
category : str
The desired category. Case sensitive.
Returns
-------
`dict`
A dict mapping questions (`str`) to answers (`list` of `str`).
"""
try:
path = next(p for p in self._all_lists() if p.stem == category)
except StopIteration:
raise FileNotFoundError("Could not find the `{}` category.".format(category))
with path.open(encoding="utf-8") as file:
try:
dict_ = yaml.safe_load(file)
except yaml.error.YAMLError as exc:
raise InvalidListError("YAML parsing failed.") from exc
else:
return dict_
async def _save_trivia_list(
self, ctx: commands.Context, attachment: discord.Attachment
) -> None:
"""Checks and saves a trivia list to data folder.
Parameters
----------
file : discord.Attachment
A discord message attachment.
Returns
-------
None
"""
filename = attachment.filename.rsplit(".", 1)[0]
# Check if trivia filename exists in core files or if it is a command
if filename in self.trivia.all_commands or any(
filename == item.stem for item in get_core_lists()
):
await ctx.send(
_(
"{filename} is a reserved trivia name and cannot be replaced.\n"
"Choose another name."
).format(filename=filename)
)
return
file = cog_data_path(self) / f"{filename}.yaml"
if file.exists():
overwrite_message = _("{filename} already exists. Do you wish to overwrite?").format(
filename=filename
)
can_react = ctx.channel.permissions_for(ctx.me).add_reactions
if not can_react:
overwrite_message += " (y/n)"
overwrite_message_object: discord.Message = await ctx.send(overwrite_message)
if can_react:
# noinspection PyAsyncCall
start_adding_reactions(
overwrite_message_object, ReactionPredicate.YES_OR_NO_EMOJIS
)
pred = ReactionPredicate.yes_or_no(overwrite_message_object, ctx.author)
event = "reaction_add"
else:
pred = MessagePredicate.yes_or_no(ctx=ctx)
event = "message"
try:
await ctx.bot.wait_for(event, check=pred, timeout=30)
except asyncio.TimeoutError:
await ctx.send(_("You took too long answering."))
return
if pred.result is False:
await ctx.send(_("I am not replacing the existing file."))
return
buffer = io.BytesIO(await attachment.read())
yaml.safe_load(buffer)
buffer.seek(0)
with file.open("wb") as fp:
fp.write(buffer.read())
await ctx.send(_("Saved Trivia list as {filename}.").format(filename=filename))
def _get_trivia_session(self, channel: discord.TextChannel) -> TriviaSession:
return next(
(session for session in self.trivia_sessions if session.ctx.channel == channel), None
)
def _all_lists(self) -> List[pathlib.Path]:
personal_lists = [p.resolve() for p in cog_data_path(self).glob("*.yaml")]
return personal_lists + get_core_lists()
def cog_unload(self):
for session in self.trivia_sessions:
session.force_stop()
def get_core_lists() -> List[pathlib.Path]:
"""Return a list of paths for all trivia lists packaged with the bot."""
core_lists_path = pathlib.Path(__file__).parent.resolve() / "data/lists"
return list(core_lists_path.glob("*.yaml")) | PypiClean |
/DeepPhysX.Sofa-22.12.1.tar.gz/DeepPhysX.Sofa-22.12.1/examples/tutorial/T3_configuration.py | from DeepPhysX.Sofa.Environment.SofaEnvironmentConfig import SofaEnvironmentConfig
from DeepPhysX.Core.Network.BaseNetworkConfig import BaseNetworkConfig
from DeepPhysX.Core.Database.BaseDatabaseConfig import BaseDatabaseConfig
# Tutorial related imports
from T1_environment import DummyEnvironment
from T2_network import DummyNetwork, DummyOptimization, DummyTransformation
# Create the Environment config
env_config = SofaEnvironmentConfig(environment_class=DummyEnvironment, # The Environment class to create
as_tcp_ip_client=True, # Create a Client / Server architecture
number_of_thread=3, # Number of Clients connected to Server
ip_address='localhost', # IP address to use for communication
port=10001, # Port number to use for communication
simulations_per_step=1, # The number of bus-steps to run
load_samples=False, # Load samples from Database to Environment
only_first_epoch=True, # Use the Environment on the first epoch only
always_produce=False) # Environment is always producing data
# Create the Network config
net_config = BaseNetworkConfig(network_class=DummyNetwork, # The Network class to create
optimization_class=DummyOptimization, # The Optimization class to create
data_transformation_class=DummyTransformation, # The DataTransformation class to create
network_dir=None, # Path to an existing Network repository
network_name='DummyNetwork', # Nickname of the Network
network_type='Dummy', # Type of the Network
which_network=-1, # The index of Network to load
save_each_epoch=False, # Do not save the network at each epoch
data_type='float32', # Training data type
require_training_stuff=False, # loss and optimizer can remain at None
lr=None, # Learning rate
loss=None, # Loss class
optimizer=None) # Optimizer class
# Create the Dataset config
database_config = BaseDatabaseConfig(existing_dir=None, # Path to an existing Database
mode='training', # Database mode
max_file_size=1, # Max size of the Dataset (Gb)
shuffle=False, # Dataset should be shuffled
normalize=False, # Database should be normalized
recompute_normalization=False) # Normalization should be recomputed at loading | PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/reg/lag/plot.py | import pandas as pd
import matplotlib.pyplot as plt
from dero.reg.interact import _interaction_tuple_to_var_name
from dero.reg.iter import _set_interaction_tuples
from dero.reg.hypothesis.lincom import hypothesis_test
# TODO: make module flexible to not having interaction
def interacted_lag_plot_from_reg_result_list(results, lag_tuple, main_iv, interaction_tuples, yvar,
interaction_var_value=13.27, date_var='Year',
outpath=None, clear_figure=True):
plot_df = produce_simplified_result_df(
results, lag_tuple, main_iv, interaction_tuples, interaction_var_value=interaction_var_value
)
return interacted_lag_plot_from_lag_result_df(
plot_df, yvar, main_iv, date_var=date_var, outpath=outpath, clear_figure=clear_figure
)
def interacted_lag_plot_from_lag_result_df(result_df, yvar, main_iv, date_var='Year', outpath=None, clear_figure=True):
"""
Creates a plot of effect of main_iv on yvar at different quantiles. To be used after
reg_for_each_quantile_produce_result_df
:param result_df: pd.DataFrame, result from reg_for_each_quantile_produce_result_df
:param yvar: str, label of dependent variable
:param main_iv: str, label of independent variable of interest
:param outpath: str, filepath to output figure. must include matplotlib supported extension such as .pdf or .png
:param clear_figure: bool, True wipe memory of matplotlib figure after running function
:return:
"""
p1 = plt.plot(result_df.q, result_df.b, color='black', label='Lag Regression Slope')
p2 = plt.fill_between(result_df.q, result_df.ub, result_df.lb, color='gray',
label='95% Confidence Interval Bound')
plt.title(f'Effect of {main_iv} on {yvar} over Time')
plt.ylabel(r'Lag Coefficient')
plt.xlabel(f'Number of {date_var}s Lagged')
plt.legend()
if outpath:
plt.savefig(outpath)
else:
plt.show()
if clear_figure:
plt.clf()
def produce_simplified_result_df(results, lag_tuple, main_iv, interaction_tuples, interaction_var_value=13.27):
simple_results = _produce_simplified_results(
results, lag_tuple, main_iv, interaction_tuples, interaction_var_value=interaction_var_value
)
return pd.DataFrame(simple_results, columns=['q', 'a', 'b', 'lb', 'ub'])
def _produce_simplified_results(results, lag_tuple, main_iv, interaction_tuples, interaction_var_value=13.27):
interaction_tuples = _set_interaction_tuples(interaction_tuples, len(results))
return [
_produce_simplified_result_list_from_one_result(
result, lag_tuple[i], main_iv, interaction_tuples[i], interaction_var_value=interaction_var_value
) for i, result in enumerate(results)
]
def _produce_simplified_result_list_from_one_result(res, lag, main_iv, interaction_tuple, interaction_var_value=13.27):
# Handle possibility of dummy cols dict coming through with result as tuple
if isinstance(res, tuple):
res = res[0]
interaction_name = _interaction_tuple_to_var_name(interaction_tuple)
# Just reassign names for brevity
b_iv = res.params[main_iv]
b_interact = res.params[interaction_name]
# 95% confidence interval +/- amount
# Get standard error of linear combination
col_coef_dict = {
main_iv: 1,
interaction_name: interaction_var_value
}
hypothesis_result = hypothesis_test(res, col_coef_dict=col_coef_dict)
return [
lag,
res.params['const'],
hypothesis_result.effect,
hypothesis_result.conf_int()[0][0], # lower 95% confidence interval
hypothesis_result.conf_int()[0][1], # upper 95% confidence interval
] | PypiClean |
/Emotion_recognition-1.0.2.tar.gz/Emotion_recognition-1.0.2/Emotion_recognition/utils/utils.py | from pickle import load, dump
stopWords={'tenidos', 'serias', 'éramos', 'algunos', 'nuestras', 'habrias', 'somos', 'estamos', 'como', 'es', 'tuvimos', 'suyo', 'tuyas', 'seras', 'tendremos', 'estaré', 'hubieramos', 'seran', 'hubo', 'estadas', 'estuvimos', 'esten', 'mucho', 'habría', 'que', 'hubisteis', 'todo', 'tendré', 'han', 'tengo', 'estuvieron', 'contra', 'estáis', 'eras', 'tengan', 'sí', 'habían', 'estén', 'tendrías', 'tus', 'estariais', 'hubieseis', 'sere', 'seriamos', 'tendreis', 'ellas', 'habiendo', 'sintiendo', 'hubiéramos', 'esta', 'ellos', 'sea', 'erais', 'pero', 'tendras', 'hemos', 'hubiesemos', 'tenemos', 'un', 'serian', 'ya', 'tuviésemos', 'si', 'estarian', 'estaríamos', 'tuvieramos', 'tendrás', 'seria', 'sentidas', 'tendrias', 'unos', 'has', 'habras', 'estas', 'fuimos', 'muchos', 'vosostras', 'estuviese', 'estarias', 'tuve', 'sin', 'tendría', 'tenga', 'mio', 'fueras', 'fueron', 'tengas', 'estuvo', 'estaran', 'seríais', 'tendrán', 'habías', 'estaba', 'estuviesen', 'habrás', 'tengamos', 'quien', 'fueses', 'estuviesemos', 'tendre', 'sereis', 'fuesen', 'estemos', 'habran', 'cuando', 'estar', 'seas', 'quienes', 'esa', 'estéis', 'otras', 'algunas', 'tuviesen', 'habremos', 'estariamos', 'sois', 'serían', 'habre', 'tendriamos', 'tú', 'fuésemos', 'mí', 'habiais', 'tened', 'de', 'teniais', 'estuvierais', 'seremos', 'tuvieses', 'seríamos', 'no', 'tuviste', 'habrías', 'estad', 'vuestro', 'estuviésemos', 'habra', 'esas', 'eramos', 'estuviéramos', 'hubieran', 'tuviese', 'suya', 'nuestros', 'estuvisteis', 'estados', 'habidos', 'serán', 'he', 'habrán', 'estada', 'serás', 'hubiesen', 'nos', 'ha', 'muy', 'tendréis', 'sean', 'yo', 'estaria', 'más', 'se', 'lo', 'habíamos', 'estado', 'habéis', 'estaremos', 'ti', 'hubieras', 'tuvo', 'los', 'esto', 'habré', 'hayas', 'tiene', 'seáis', 'tenia', 'vuestras', 'tenéis', 'eran', 'son', 'estarán', 'estuviera', 'tengáis', 'habriamos', 'eso', 'tenían', 'estarían', 'esté', 'haya', 'estais', 'fuisteis', 'estarías', 'nuestro', 'fuera', 'suyos', 'estuvieses', 'sera', 'hubimos', 'él', 'tuyo', 'otros', 'tuvieseis', 'fuese', 'hubiera', 'habeis', 'cual', 'estes', 'estara', 'tenida', 'qué', 'mas', 'tendrian', 'ella', 'estuvieras', 'tenía', 'habrian', 'teneis', 'hayáis', 'tenian', 'fueran', 'del', 'estaras', 'estabas', 'tuviesemos', 'hubierais', 'vosostros', 'tenido', 'habriais', 'mias', 'tendrían', 'tuvisteis', 'suyas', 'habíais', 'vuestros', 'asi', 'otro', 'estés', 'tuvieran', 'seais', 'hube', 'estos', 'tuyos', 'estan', 'estás', 'al', 'mías', 'por', 'nosotras', 'las', 'seré', 'sentido', 'habrá', 'será', 'tendriais', 'habríamos', 'fuiste', 'a', 'estaban', 'habríais', 'me', 'tendríamos', 'estabais', 'también', 'nada', 'mios', 'hayais', 'donde', 'hubieses', 'hubiésemos', 'tienen', 'o', 'mi', 'había', 'habidas', 'tienes', 'tuviera', 'habreis', 'estareis', 'fui', 'hayan', 'con', 'estuvieseis', 'tuvieron', 'seamos', 'nosotros', 'siente', 'habian', 'esteis', 'habiamos', 'mis', 'habia', 'están', 'tendríais', 'entre', 'estabamos', 'otra', 'míos', 'hay', 'estando', 'mío', 'tenías', 'habias', 'hasta', 'tuya', 'nuestra', 'os', 'soy', 'estaríais', 'tu', 'la', 'fueramos', 'está', 'desde', 'estarás', 'seriais', 'habido', 'fue', 'era', 'fueseis', 'habida', 'para', 'tendran', 'tenidas', 'tendria', 'sería', 'estaría', 'habria', 'tenias', 'le', 'tambien', 'tuvierais', 'teniendo', 'su', 'estará', 'mia', 'estuvieramos', 'todos', 'vuestra', 'poco', 'uno', 'estare', 'hayamos', 'esos', 'el', 'ese', 'sentidos', 'teniamos', 'este', 'e', 'ni', 'hubiste', 'seréis', 'sentida', 'teníais', 'tuviéramos', 'ante', 'sobre', 'y', 'mía', 'hubieron', 'tengais', 'durante', 'fuerais', 'en', 'te', 'estoy', 'estuve', 'fuesemos', 'tuvieras', 'estábamos', 'tendra', 'una', 'hubiese', 'tendrá', 'tanto', 'algo', 'sus', 'fuéramos', 'antes', 'sentid', 'les', 'estuviste', 'estaréis', 'habrían', 'serías', 'teníamos', 'eres', 'habréis', 'porque', 'estuvieran'}
abc={'q','w','e','r','t','y','u','i','o','p','a','s','d','f','g','h','j','k','l','z','x','c','v','b','n','ñ','m',' ','.','á','é','í','ó','ú'}
def save_data(data, filename):
with open(filename, 'wb') as outfile:
dump(data, filename)
def load_data(data, filename):
with open(filename, 'wb') as outfile:
load(data, filename) | PypiClean |
/OctoBot-0.4.54.tar.gz/OctoBot-0.4.54/octobot/community/identifiers_provider.py | import octobot.constants as constants
import octobot.enums as enums
import octobot_commons.logging as logging
import octobot_commons.configuration as configuration
class IdentifiersProvider:
ENABLED_ENVIRONMENT: str = None
COMMUNITY_URL: str = None
FEED_URL: str = None
BACKEND_API_URL: str = None
FRONTEND_PASSWORD_RECOVER_URL: str = None
BACKEND_ACCOUNT_URL: str = None
BACKEND_AUTH_URL: str = None
BACKEND_PUBLIC_TOKEN: str = None
MONGO_REALM_URL: str = None
MONGO_APP_ID: str = None
GQL_AUTH_URL: str = None
GQL_BACKEND_API_URL: str = None
@staticmethod
def use_production():
IdentifiersProvider.COMMUNITY_URL = constants.OCTOBOT_COMMUNITY_URL
IdentifiersProvider.FEED_URL = constants.OCTOBOT_COMMUNITY_FEED_URL
IdentifiersProvider.BACKEND_API_URL = constants.COMMUNITY_BACKEND_API_URL
IdentifiersProvider.FRONTEND_PASSWORD_RECOVER_URL = constants.OCTOBOT_COMMUNITY_RECOVER_PASSWORD_URL
IdentifiersProvider.BACKEND_ACCOUNT_URL = constants.COMMUNITY_BACKEND_ACCOUNT_URL
IdentifiersProvider.BACKEND_AUTH_URL = constants.COMMUNITY_BACKEND_AUTH_URL
IdentifiersProvider.BACKEND_PUBLIC_TOKEN = constants.COMMUNITY_BACKEND_PUBLIC_TOKEN
IdentifiersProvider.MONGO_REALM_URL = constants.COMMUNITY_MONGO_REALM_URL
IdentifiersProvider.MONGO_APP_ID = constants.COMMUNITY_MONGO_APP_ID
IdentifiersProvider.GQL_AUTH_URL = constants.COMMUNITY_GQL_AUTH_URL
IdentifiersProvider.GQL_BACKEND_API_URL = constants.COMMUNITY_GQL_BACKEND_API_URL
IdentifiersProvider._register_environment(enums.CommunityEnvironments.Production)
@staticmethod
def use_staging():
IdentifiersProvider.COMMUNITY_URL = constants.STAGING_OCTOBOT_COMMUNITY_URL
IdentifiersProvider.FEED_URL = constants.STAGING_OCTOBOT_COMMUNITY_FEED_URL
IdentifiersProvider.BACKEND_API_URL = constants.STAGING_COMMUNITY_BACKEND_API_URL
IdentifiersProvider.FRONTEND_PASSWORD_RECOVER_URL = constants.STAGING_COMMUNITY_RECOVER_PASSWORD_URL
IdentifiersProvider.BACKEND_ACCOUNT_URL = constants.STAGING_COMMUNITY_BACKEND_ACCOUNT_URL
IdentifiersProvider.BACKEND_AUTH_URL = constants.STAGING_COMMUNITY_BACKEND_AUTH_URL
IdentifiersProvider.BACKEND_PUBLIC_TOKEN = constants.STAGING_COMMUNITY_BACKEND_PUBLIC_TOKEN
IdentifiersProvider.MONGO_REALM_URL = constants.STAGING_COMMUNITY_MONGO_REALM_URL
IdentifiersProvider.MONGO_APP_ID = constants.STAGING_COMMUNITY_MONGO_APP_ID
IdentifiersProvider.GQL_AUTH_URL = constants.STAGING_COMMUNITY_GQL_AUTH_URL
IdentifiersProvider.GQL_BACKEND_API_URL = constants.STAGING_COMMUNITY_GQL_BACKEND_API_URL
IdentifiersProvider._register_environment(enums.CommunityEnvironments.Staging)
@staticmethod
def _register_environment(env):
if IdentifiersProvider.ENABLED_ENVIRONMENT != env:
logging.get_logger(IdentifiersProvider.__name__).debug(f"Using {env.value} Community environment.")
IdentifiersProvider.ENABLED_ENVIRONMENT = env
@staticmethod
def use_default():
if constants.USE_BETA_EARLY_ACCESS:
IdentifiersProvider.use_staging()
else:
IdentifiersProvider.use_production()
@staticmethod
def is_staging_environment_enabled(config: dict):
try:
env = config[constants.CONFIG_COMMUNITY][constants.CONFIG_COMMUNITY_ENVIRONMENT]
return enums.CommunityEnvironments(env) is enums.CommunityEnvironments.Staging
except (KeyError, ValueError):
return False
@staticmethod
def use_environment_from_config(config: configuration.Configuration):
if IdentifiersProvider.is_staging_environment_enabled(config.config):
IdentifiersProvider.use_staging()
else:
IdentifiersProvider.use_default() | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/glob2/glob2/impl.py |
from __future__ import absolute_import
import sys
import os
import re
from os.path import join
from . import fnmatch
try:
from itertools import imap
except ImportError:
imap = map
class Globber(object):
listdir = staticmethod(os.listdir)
isdir = staticmethod(os.path.isdir)
islink = staticmethod(os.path.islink)
exists = staticmethod(os.path.lexists)
def walk(self, top, followlinks=False, sep=None):
"""A simplified version of os.walk (code copied) that uses
``self.listdir``, and the other local filesystem methods.
Because we don't care about file/directory distinctions, only
a single list is returned.
"""
try:
names = self.listdir(top)
except os.error as err:
return
items = []
for name in names:
items.append(name)
yield top, items
for name in items:
new_path = _join_paths([top, name], sep=sep)
if followlinks or not self.islink(new_path):
for x in self.walk(new_path, followlinks):
yield x
def glob(self, pathname, with_matches=False, include_hidden=False, recursive=True,
norm_paths=True, case_sensitive=True, sep=None):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If ``include_hidden`` is True, then files and folders starting with
a dot are also returned.
"""
return list(self.iglob(pathname, with_matches, include_hidden,
norm_paths, case_sensitive, sep))
def iglob(self, pathname, with_matches=False, include_hidden=False, recursive=True,
norm_paths=True, case_sensitive=True, sep=None):
"""Return an iterator which yields the paths matching a pathname
pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If ``with_matches`` is True, then for each matching path
a 2-tuple will be returned; the second element if the tuple
will be a list of the parts of the path that matched the individual
wildcards.
If ``include_hidden`` is True, then files and folders starting with
a dot are also returned.
"""
result = self._iglob(pathname, True, include_hidden,
norm_paths, case_sensitive, sep)
if with_matches:
return result
return imap(lambda s: s[0], result)
def _iglob(self, pathname, rootcall, include_hidden,
norm_paths, case_sensitive, sep):
"""Internal implementation that backs :meth:`iglob`.
``rootcall`` is required to differentiate between the user's call to
iglob(), and subsequent recursive calls, for the purposes of resolving
certain special cases of ** wildcards. Specifically, "**" is supposed
to include the current directory for purposes of globbing, but the
directory itself should never be returned. So if ** is the lastmost
part of the ``pathname`` given the user to the root call, we want to
ignore the current directory. For this, we need to know which the root
call is.
"""
# Short-circuit if no glob magic
if not has_magic(pathname):
if self.exists(pathname):
yield pathname, ()
return
# If no directory part is left, assume the working directory
dirname, basename = os.path.split(pathname)
# If the directory is globbed, recurse to resolve.
# If at this point there is no directory part left, we simply
# continue with dirname="", which will search the current dir.
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and has_magic(dirname):
# Note that this may return files, which will be ignored
# later when we try to use them as directories.
# Prefiltering them here would only require more IO ops.
dirs = self._iglob(dirname, False, include_hidden,
norm_paths, case_sensitive, sep)
else:
dirs = [(dirname, ())]
# Resolve ``basename`` expr for every directory found
for dirname, dir_groups in dirs:
for name, groups in self.resolve_pattern(dirname, basename,
not rootcall, include_hidden,
norm_paths, case_sensitive, sep):
yield _join_paths([dirname, name], sep=sep), dir_groups + groups
def resolve_pattern(self, dirname, pattern, globstar_with_root, include_hidden,
norm_paths, case_sensitive, sep):
"""Apply ``pattern`` (contains no path elements) to the
literal directory in ``dirname``.
If pattern=='', this will filter for directories. This is
a special case that happens when the user's glob expression ends
with a slash (in which case we only want directories). It simpler
and faster to filter here than in :meth:`_iglob`.
"""
if sys.version_info[0] == 3:
if isinstance(pattern, bytes):
dirname = bytes(os.curdir, 'ASCII')
else:
if isinstance(pattern, unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or
sys.getdefaultencoding())
# If no magic, short-circuit, only check for existence
if not has_magic(pattern):
if pattern == '':
if self.isdir(dirname):
return [(pattern, ())]
else:
if self.exists(_join_paths([dirname, pattern], sep=sep)):
return [(pattern, ())]
return []
if not dirname:
dirname = os.curdir
try:
if pattern == '**':
# Include the current directory in **, if asked; by adding
# an empty string as opposed to '.', we spare ourselves
# having to deal with os.path.normpath() later.
names = [''] if globstar_with_root else []
for top, entries in self.walk(dirname, sep=sep):
_mkabs = lambda s: _join_paths([top[len(dirname) + 1:], s], sep=sep)
names.extend(map(_mkabs, entries))
# Reset pattern so that fnmatch(), which does not understand
# ** specifically, will only return a single group match.
pattern = '*'
else:
names = self.listdir(dirname)
except os.error:
return []
if not include_hidden and not _ishidden(pattern):
# Remove hidden files, but take care to ensure
# that the empty string we may have added earlier remains.
# Do not filter out the '' that we might have added earlier
names = filter(lambda x: not x or not _ishidden(x), names)
return fnmatch.filter(names, pattern, norm_paths, case_sensitive, sep)
default_globber = Globber()
glob = default_globber.glob
iglob = default_globber.iglob
del default_globber
magic_check = re.compile('[*?[]')
magic_check_bytes = re.compile(b'[*?[]')
def has_magic(s):
if isinstance(s, bytes):
match = magic_check_bytes.search(s)
else:
match = magic_check.search(s)
return match is not None
def _ishidden(path):
return path[0] in ('.', b'.'[0])
def _join_paths(paths, sep=None):
path = join(*paths)
if sep:
path = re.sub(r'\/', sep, path) # cached internally
return path | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Variables/EnumVariable.py | from typing import Tuple, Callable
import SCons.Errors
__all__ = ['EnumVariable',]
def _validator(key, val, env, vals) -> None:
if val not in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s. Valid values are: %s' % (key, val, vals))
def EnumVariable(key, help, default, allowed_values, map={}, ignorecase=0) -> Tuple[str, str, str, Callable, Callable]:
"""Return a tuple describing an enumaration SCons Variable.
The input parameters describe an option with only certain values
allowed. Returns A tuple including an appropriate converter and
validator. The result is usable as input to :meth:`Add`.
*key* and *default* are passed directly on to :meth:`Add`.
*help* is the descriptive part of the help text,
and will have the allowed values automatically appended.
*allowed_values* is a list of strings, which are the allowed values
for this option.
The *map*-dictionary may be used for converting the input value
into canonical values (e.g. for aliases).
The value of *ignorecase* defines the behaviour of the validator:
* 0: the validator/converter are case-sensitive.
* 1: the validator/converter are case-insensitive.
* 2: the validator/converter is case-insensitive and the
converted value will always be lower-case.
The *validator* tests whether the value is in the list of allowed values.
The *converter* converts input values according to the given
*map*-dictionary (unmapped input values are returned unchanged).
"""
help = '%s (%s)' % (help, '|'.join(allowed_values))
# define validator
if ignorecase:
validator = lambda key, val, env: \
_validator(key, val.lower(), env, allowed_values)
else:
validator = lambda key, val, env: \
_validator(key, val, env, allowed_values)
# define converter
if ignorecase == 2:
converter = lambda val: map.get(val.lower(), val).lower()
elif ignorecase == 1:
converter = lambda val: map.get(val.lower(), val)
else:
converter = lambda val: map.get(val, val)
return (key, help, default, validator, converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/IdracRedfishSupportTest-0.0.7.tar.gz/IdracRedfishSupportTest-0.0.7/EnableDisableBiosBootOrderDevicesREDFISH.py |
import argparse
import getpass
import json
import logging
import re
import requests
import sys
import time
import warnings
from datetime import datetime
from pprint import pprint
warnings.filterwarnings("ignore")
parser=argparse.ArgumentParser(description="Python script using Redfish API to either get current BIOS boot order device details or enable/disable BIOS boot order devices.")
parser.add_argument('-ip',help='iDRAC IP address', required=False)
parser.add_argument('-u', help='iDRAC username', required=False)
parser.add_argument('-p', help='iDRAC password. If you do not pass in argument -p, script will prompt to enter user password which will not be echoed to the screen.', required=False)
parser.add_argument('-x', help='Pass in X-Auth session token for executing Redfish calls. All Redfish calls will use X-Auth token instead of username/password', required=False)
parser.add_argument('--ssl', help='SSL cert verification for all Redfish calls, pass in value \"true\" or \"false\". By default, this argument is not required and script ignores validating SSL cert for all Redfish calls.', required=False)
parser.add_argument('--script-examples', help='Get executing script examples', action="store_true", dest="script_examples", required=False)
parser.add_argument('--get', help='Get current boot order device details.', action="store_true", required=False)
parser.add_argument('--enable', help='Enable one boot order device, pass in the ID of the boot device. Example: Boot0002', required=False)
parser.add_argument('--disable', help='Disable one boot order device, pass in the ID of the boot device. Example: Boot0002', required=False)
parser.add_argument('--noreboot', help='Pass in this argument to not reboot the server now to execute BIOS config job. BIOS config job will still be scheduled and execute on next server reboot.', action="store_true", required=False)
args=vars(parser.parse_args())
logging.basicConfig(format='%(message)s', stream=sys.stdout, level=logging.INFO)
def script_examples():
print("""\n- EnableDisableBiosBootOrderDevicesREDFISH.py -ip 192.168.0.120 -u root -p calvin --get, this example will get the current boot order device details.
\n- EnableDisableBiosBootOrderDevicesREDFISH.py -ip 192.168.0.120 -u root -p calvin --enable Boot0002, this example will enable BIOS boot order device Boot0002.
\n- EnableDisableBiosBootOrderDevicesREDFISH.py -ip 192.168.0.120 -x 7041fd6528bc5d9d88a34cdc14bf111b --disable Boot0008, this example using iDRAC X-auth token session will disable BIOS boot order device Boot0008.""")
sys.exit(0)
def check_supported_idrac_version():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/BootOptions' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/BootOptions' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code == 401:
logging.warning("\n- WARNING, status code %s returned. Incorrect iDRAC username/password or invalid privilege detected." % response.status_code)
sys.exit(0)
if response.status_code != 200:
logging.warning("\n- WARNING, iDRAC version installed does not support this feature using Redfish API")
sys.exit(0)
def get_current_boot_order():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Bios' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/Bios' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
if response.status_code != 200:
logging.error("- FAIL, GET command failed to GET BIOS details, status code %s returned" % response.status_code)
sys.exit(0)
else:
data = response.json()
try:
current_boot_mode = data['Attributes']['BootMode']
except:
logging.error("- FAIL, unable to locate BootMode attribute in JSON output")
sys.exit(0)
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/BootOptions?$expand=*($levels=1)' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1/BootOptions?$expand=*($levels=1)' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
logging.info("\n- Current boot order detected for BIOS boot mode \"%s\" -\n" % current_boot_mode)
if data["Members"] == []:
logging.warning("- WARNING, no boot devices detected for BIOS boot mode %s" % current_boot_mode)
sys.exit(0)
for i in data['Members']:
pprint(i)
print("\n")
def enable_disable_boot_devices():
global job_id
if args["enable"]:
url = 'https://%s/redfish/v1/Systems/System.Embedded.1/BootOptions/%s' % (idrac_ip, args["enable"])
payload = {"BootOptionEnabled":True}
change_boot_device = "enable"
if args["disable"]:
url = 'https://%s/redfish/v1/Systems/System.Embedded.1/BootOptions/%s' % (idrac_ip, args["disable"])
payload = {"BootOptionEnabled":False}
change_boot_device = "disable"
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.patch(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.patch(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
data = response.json()
if response.status_code == 200 or response.status_code == 202:
logging.info("\n- PASS: PATCH command passed to %s BIOS boot order device" % change_boot_device)
else:
logging.error("\n- FAIL, PATCH command failed to change BIOS boot order device, status code %s" % response.status_code)
detail_message = str(response.__dict__)
logging.error(detail_message)
sys.exit(0)
try:
job_id = response.headers['Location'].split("/")[-1]
except:
logging.error("- FAIL, unable to find job ID in headers PATCH response, headers output is:\n%s" % response.headers)
sys.exit(0)
logging.info("- PASS, job ID \"%s\" successfully created to %s BIOS boot order device" % (job_id, change_boot_device))
def get_job_status_scheduled():
count = 0
while True:
if count == 5:
logging.error("- FAIL, GET job status retry count of 5 has been reached, script will exit")
sys.exit(0)
try:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert,auth=(idrac_username, idrac_password))
except requests.ConnectionError as error_message:
logging.error(error_message)
logging.info("\n- INFO, GET request will try again to poll job status")
time.sleep(5)
count += 1
continue
if response.status_code == 200:
time.sleep(5)
else:
logging.error("\n- FAIL, Command failed to check job status, return code is %s" % response.status_code)
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
data = response.json()
if data['Message'] == "Task successfully scheduled.":
logging.info("- INFO, staged config job marked as scheduled")
break
else:
logging.info("- INFO: job status not scheduled, current status: %s\n" % data['Message'])
def reboot_server():
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
logging.info("\n- INFO, Current server power state is: %s" % data['PowerState'])
if data['PowerState'] == "On":
url = 'https://%s/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset' % idrac_ip
payload = {'ResetType': 'GracefulShutdown'}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 204:
logging.info("- PASS, POST command passed to gracefully power OFF server, status code return is %s" % response.status_code)
logging.info("- INFO, script will now verify the server was able to perform a graceful shutdown. If the server was unable to perform a graceful shutdown, forced shutdown will be invoked in 5 minutes")
time.sleep(15)
start_time = datetime.now()
else:
logging.error("\n- FAIL, Command failed to gracefully power OFF server, status code is: %s\n" % response.status_code)
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
while True:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
current_time = str(datetime.now() - start_time)[0:7]
if data['PowerState'] == "Off":
logging.info("- PASS, GET command passed to verify graceful shutdown was successful and server is in OFF state")
break
elif current_time >= "0:05:00":
logging.info("- INFO, unable to perform graceful shutdown, server will now perform forced shutdown")
payload = {'ResetType': 'ForceOff'}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 204:
logging.info("- PASS, POST command passed to perform forced shutdown, status code return is %s" % response.status_code)
time.sleep(15)
if args["x"]:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Systems/System.Embedded.1' % idrac_ip, verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
if data['PowerState'] == "Off":
logging.info("- PASS, GET command passed to verify forced shutdown was successful and server is in OFF state")
break
else:
logging.error("- FAIL, server not in OFF state, current power status is %s" % data['PowerState'])
sys.exit(0)
else:
continue
payload = {'ResetType': 'On'}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 204:
logging.info("- PASS, Command passed to power ON server, status code return is %s" % response.status_code)
else:
logging.error("\n- FAIL, Command failed to power ON server, status code is: %s\n" % response.status_code)
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
elif data['PowerState'] == "Off":
url = 'https://%s/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset' % idrac_ip
payload = {'ResetType': 'On'}
if args["x"]:
headers = {'content-type': 'application/json', 'X-Auth-Token': args["x"]}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert)
else:
headers = {'content-type': 'application/json'}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=verify_cert,auth=(idrac_username,idrac_password))
if response.status_code == 204:
logging.info("- PASS, Command passed to power ON server, code return is %s" % response.status_code)
else:
logging.error("\n- FAIL, Command failed to power ON server, status code is: %s\n" % response.status_code)
logging.error("Extended Info Message: {0}".format(response.json()))
sys.exit(0)
else:
logging.error("- FAIL, unable to get current server power state to perform either reboot or power on")
sys.exit(0)
def loop_job_status_final():
start_time = datetime.now()
if args["x"]:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert,auth=(idrac_username, idrac_password))
data = response.json()
while True:
if args["x"]:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert, headers={'X-Auth-Token': args["x"]})
else:
response = requests.get('https://%s/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/%s' % (idrac_ip, job_id), verify=verify_cert,auth=(idrac_username, idrac_password))
current_time = (datetime.now()-start_time)
if response.status_code != 200:
logging.error("\n- FAIL, GET command failed to check job status, return code is %s" % statusCode)
logging.error("Extended Info Message: {0}".format(req.json()))
sys.exit(0)
data = response.json()
if str(current_time)[0:7] >= "2:00:00":
logging.error("\n- FAIL: Timeout of 2 hours has been hit, script stopped\n")
sys.exit(0)
elif "Fail" in data['Message'] or "fail" in data['Message'] or data['JobState'] == "Failed":
logging.error("- FAIL: job ID %s failed, failed message is: %s" % (job_id, data['Message']))
sys.exit(0)
elif data['JobState'] == "Completed":
logging.info("\n--- PASS, Final Detailed Job Status Results ---\n")
for i in data.items():
pprint(i)
break
else:
logging.info("- INFO, job status not completed, current status: \"%s\"" % data['Message'])
time.sleep(30)
if __name__ == "__main__":
if args["script_examples"]:
script_examples()
if args["ip"] and args["ssl"] or args["u"] or args["p"] or args["x"]:
idrac_ip=args["ip"]
idrac_username=args["u"]
if args["p"]:
idrac_password=args["p"]
if not args["p"] and not args["x"] and args["u"]:
idrac_password = getpass.getpass("\n- Argument -p not detected, pass in iDRAC user %s password: " % args["u"])
if args["ssl"]:
if args["ssl"].lower() == "true":
verify_cert = True
elif args["ssl"].lower() == "false":
verify_cert = False
else:
verify_cert = False
else:
verify_cert = False
check_supported_idrac_version()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.")
sys.exit(0)
if args["get"]:
get_current_boot_order()
elif args["enable"] or args["disable"]:
enable_disable_boot_devices()
get_job_status_scheduled()
if args["noreboot"]:
logging.info("- INFO, --noreboot argument detected. BIOS config job is still scheduled and will execute on next server manual reboot")
else:
logging.info("- INFO, rebooting server to execute BIOS config job")
reboot_server()
loop_job_status_final()
else:
logging.error("\n- FAIL, invalid argument values or not all required parameters passed in. See help text or argument --script-examples for more details.") | PypiClean |
/BlueWhale3_Text-1.6.0-py3-none-any.whl/orangecontrib/text/widgets/utils/widgets.py | import os
from AnyQt.QtWidgets import (QComboBox, QWidget, QHBoxLayout,
QSizePolicy, QLineEdit, QDoubleSpinBox,
QSpinBox, QTextEdit, QDateEdit, QGroupBox,
QPushButton, QStyle, QFileDialog, QLabel,
QGridLayout, QCheckBox, QStackedLayout)
from AnyQt.QtGui import QColor
from AnyQt.QtCore import QDate, pyqtSignal, Qt, QSize
from Orange.data import DiscreteVariable, ContinuousVariable, TimeVariable, \
StringVariable
from Orange.widgets.gui import OWComponent, hBox
from Orange.widgets import settings
from orangecontrib.text.corpus import get_sample_corpora_dir
class ListEdit(QTextEdit):
PLACEHOLDER_COLOR = QColor(128, 128, 128)
USER_TEXT_COLOR = QColor(0, 0, 0)
def __init__(self, master=None, attr=None, placeholder_text=None,
fixed_height=None, *args):
super().__init__(*args)
self.master = master
self.attr = attr
self.placeholder_text = placeholder_text
if self.master and self.attr:
self.setText('\n'.join(getattr(self.master, self.attr, [])))
self.set_placeholder()
self.textChanged.connect(self.synchronize)
if fixed_height:
self.setFixedHeight(fixed_height)
def set_placeholder(self):
""" Set placeholder if there is no user input. """
if self.toPlainText() == '':
self.setFontItalic(True)
self.setTextColor(self.PLACEHOLDER_COLOR)
self.setText(self.placeholder_text)
def toPlainText(self):
""" Return only text input from user. """
text = super().toPlainText()
if self.placeholder_text is not None and text == self.placeholder_text:
text = ''
return text
def focusInEvent(self, event):
super().focusInEvent(event)
if self.toPlainText() == '':
self.clear()
self.setFontItalic(False)
self.setTextColor(self.USER_TEXT_COLOR)
def focusOutEvent(self, event):
self.set_placeholder()
QTextEdit.focusOutEvent(self, event)
def synchronize(self):
if self.master and self.attr:
setattr(self.master, self.attr, self.value())
def value(self):
return self.text.split('\n') if self.text else []
@property
def text(self):
return self.toPlainText().strip()
class QueryBox(QComboBox):
def __init__(self, widget, master, history, callback, min_width=150):
super().__init__()
self.master = master
self.history = history
self.callback = callback
self.setMinimumWidth(min_width)
self.setEditable(True)
self.activated[int].connect(self.synchronize) # triggered for enter and drop-down
widget.layout().addWidget(self)
self.refresh()
def synchronize(self, n=None, silent=False):
if n is not None and n < len(self.history): # selecting from drop-down
name = self.history[n]
del self.history[n]
self.history.insert(0, name)
else: # enter pressed
query = self.currentText()
if query != '':
if query in self.history:
self.history.remove(query)
self.history.insert(0, self.currentText())
self.refresh()
if callable(self.callback) and not silent:
self.callback()
def refresh(self):
self.clear()
for query in self.history:
self.addItem(query)
class CheckListLayout(QGroupBox):
def __init__(self, title, master, attr, items, cols=1, callback=None):
super().__init__(title=title)
self.master = master
self.attr = attr
self.items = items
self.callback = callback
self.current_values = getattr(self.master, self.attr)
layout = QGridLayout()
self.setLayout(layout)
nrows = len(items) // cols + bool(len(items) % cols)
self.boxes = []
for i, value in enumerate(self.items):
box = QCheckBox(value)
box.setChecked(value in self.current_values)
box.stateChanged.connect(self.synchronize)
self.boxes.append(box)
layout.addWidget(box, i % nrows, i // nrows)
def synchronize(self):
values = []
for item, check_box in zip(self.items, self.boxes):
if check_box.isChecked():
values.append(item)
setattr(self.master, self.attr, values)
if self.callback:
self.callback()
class ComboBox(QComboBox):
def __init__(self, master, attr, items):
super().__init__()
self.attr = attr
self.master = master
if not isinstance(items[0], tuple):
self.items = [(str(item), item) for item in items]
else:
self.items = items
for i, (key, value) in enumerate(self.items):
self.addItem(key)
if value == getattr(master, attr, None):
self.setCurrentIndex(i)
self.currentIndexChanged[int].connect(self.synchronize)
def synchronize(self, i):
setattr(self.master, self.attr, self.items[i][1])
class DatePicker(QDateEdit):
QT_DATE_FORMAT = 'yyyy-MM-dd'
PY_DATE_FORMAT = '%Y-%m-%d'
def __init__(self, widget, master, attribute, label, margin=(0, 0, 0, 0),
display_format=QT_DATE_FORMAT, min_date=None, max_date=None, calendar_popup=True):
super().__init__()
self.master = master
self.attribute = attribute
hb = hBox(widget)
hb.layout().setContentsMargins(*margin)
hb.layout().addWidget(QLabel(label))
hb.layout().addWidget(self)
self.setCalendarPopup(calendar_popup)
self.setDisplayFormat(display_format)
self.setDate(self.to_qdate(getattr(master, attribute)))
if min_date:
self.setMinimumDate(self.to_qdate(min_date))
if max_date:
self.setMaximumDate(self.to_qdate(max_date))
self.dateChanged.connect(self.synchronize)
@classmethod
def to_qdate(cls, date):
return QDate.fromString(date.strftime(cls.PY_DATE_FORMAT),
cls.QT_DATE_FORMAT)
def synchronize(self):
setattr(self.master, self.attribute, self.date().toPyDate())
class DatePickerInterval(QWidget):
def __init__(self, widget, master, attribute_from, attribute_to, min_date=None, max_date=None,
label_from='From:', label_to='To:', margin=(0, 0, 0, 0)):
super().__init__()
self.setParent(widget)
hb = hBox(widget)
self.picker_from = DatePicker(hb, master, attribute_from, label_from,
min_date=min_date, max_date=max_date, margin=margin)
self.picker_to = DatePicker(hb, master, attribute_to, label_to,
min_date=min_date, max_date=max_date, margin=margin)
self.picker_from.dateChanged.connect(self.synchronize)
self.picker_to.dateChanged.connect(self.synchronize)
self.synchronize()
def synchronize(self):
self.picker_from.setMaximumDate(self.picker_to.date())
self.picker_to.setMinimumDate(self.picker_from.date())
class FileWidget(QWidget):
on_open = pyqtSignal(str)
# TODO consider removing directory_aliases since it is not used any more
def __init__(self, dialog_title='', dialog_format='',
start_dir=os.path.expanduser('~/'),
icon_size=(12, 20), minimal_width=200,
browse_label='Browse', on_open=None,
reload_button=True, reload_label='Reload',
recent_files=None, directory_aliases=None,
allow_empty=True, empty_file_label='(none)'):
""" Creates a widget with a button for file loading and
an optional combo box for recent files and reload buttons.
Args:
dialog_title (str): The title of the dialog.
dialog_format (str): Formats for the dialog.
start_dir (str): A directory to start from.
icon_size (int, int): The size of buttons' icons.
on_open (callable): A callback function that accepts filepath as the only argument.
reload_button (bool): Whether to show reload button.
reload_label (str): The text displayed on the reload button.
recent_files (List[str]): List of recent files.
directory_aliases (dict): An {alias: dir} dictionary for fast directories' access.
allow_empty (bool): Whether empty path is allowed.
"""
super().__init__()
self.dialog_title = dialog_title
self.dialog_format = dialog_format
self.start_dir = start_dir
# Recent files should also contain `empty_file_label` so
# when (none) is selected this is stored in settings.
self.recent_files = recent_files if recent_files is not None else []
self.directory_aliases = directory_aliases or {}
self.allow_empty = allow_empty
self.empty_file_label = empty_file_label
if self.empty_file_label not in self.recent_files \
and (self.allow_empty or not self.recent_files):
self.recent_files.append(self.empty_file_label)
self.check_existence()
self.on_open.connect(on_open)
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
if recent_files is not None:
self.file_combo = QComboBox()
self.file_combo.setMinimumWidth(minimal_width)
self.file_combo.activated[int].connect(self.select)
self.update_combo()
layout.addWidget(self.file_combo)
self.browse_button = QPushButton(browse_label)
self.browse_button.setFocusPolicy(Qt.NoFocus)
self.browse_button.clicked.connect(self.browse)
self.browse_button.setIcon(self.style()
.standardIcon(QStyle.SP_DirOpenIcon))
self.browse_button.setIconSize(QSize(*icon_size))
self.browse_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
layout.addWidget(self.browse_button)
if reload_button:
self.reload_button = QPushButton(reload_label)
self.reload_button.setFocusPolicy(Qt.NoFocus)
self.reload_button.clicked.connect(self.reload)
self.reload_button.setIcon(self.style()
.standardIcon(QStyle.SP_BrowserReload))
self.reload_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.reload_button.setIconSize(QSize(*icon_size))
layout.addWidget(self.reload_button)
def browse(self, start_dir=None):
start_dir = start_dir or self.start_dir
path, _ = QFileDialog().getOpenFileName(self, self.dialog_title,
start_dir, self.dialog_format)
if path and self.recent_files is not None:
if path in self.recent_files:
self.recent_files.remove(path)
self.recent_files.insert(0, path)
self.update_combo()
if path:
self.open_file(path)
def select(self, n):
name = self.file_combo.currentText()
if name == self.empty_file_label:
del self.recent_files[n]
self.recent_files.insert(0, self.empty_file_label)
self.update_combo()
self.open_file(self.empty_file_label)
elif name in self.directory_aliases:
self.browse(self.directory_aliases[name])
elif n < len(self.recent_files):
name = self.recent_files[n]
del self.recent_files[n]
self.recent_files.insert(0, name)
self.update_combo()
self.open_file(self.recent_files[0])
def update_combo(self):
""" Sync combo values to the changes in self.recent_files. """
if self.recent_files is not None:
self.file_combo.clear()
for i, file in enumerate(self.recent_files):
# remove (none) when we have some files and allow_empty=False
if file == self.empty_file_label and \
not self.allow_empty and len(self.recent_files) > 1:
del self.recent_files[i]
else:
self.file_combo.addItem(os.path.split(file)[1])
for alias in self.directory_aliases.keys():
self.file_combo.addItem(alias)
def reload(self):
if self.recent_files:
self.select(0)
def check_existence(self):
if self.recent_files:
to_remove = []
for file in self.recent_files:
doc_path = os.path.join(get_sample_corpora_dir(), file)
exists = any(os.path.exists(f) for f in [file, doc_path])
if file != self.empty_file_label and not exists:
to_remove.append(file)
for file in to_remove:
self.recent_files.remove(file)
def open_file(self, path):
self.on_open.emit(path if path != self.empty_file_label else '')
def get_selected_filename(self):
if self.recent_files:
return self.recent_files[0]
else:
return self.empty_file_label
class ValidatedLineEdit(QLineEdit):
invalid_input_signal = pyqtSignal(str)
def __init__(self, master, attr, validator, *args):
super().__init__(*args)
self.master = master
self.attr = attr
self.validator = validator
self.setText(getattr(master, attr))
self.on_change()
self.textChanged.connect(self.on_change)
def on_change(self):
if self.validator(self.text()):
self.setStyleSheet("QLineEdit { border : 1px solid gray;}")
self.synchronize()
else:
self.setStyleSheet("QLineEdit { border : 2px solid red;}")
self.invalid_input_signal.emit("Invalid '{}' value.".format(self.attr))
def synchronize(self):
setattr(self.master, self.attr, self.text())
class AbsoluteRelativeSpinBox(QWidget):
editingFinished = pyqtSignal()
valueChanged = pyqtSignal()
def __init__(self, *args, **kwargs):
super().__init__(*args)
layout = QStackedLayout(self)
self.double_spin = QDoubleSpinBox()
self.double_spin.valueChanged.connect(self.double_value_changed)
self.double_spin.editingFinished.connect(self.double_editing_finished)
layout.addWidget(self.double_spin)
self.int_spin = QSpinBox()
self.int_spin.setMaximum(10 ** 4)
self.int_spin.valueChanged.connect(self.int_value_changed)
self.int_spin.editingFinished.connect(self.int_editing_finished)
layout.addWidget(self.int_spin)
self.setValue(kwargs.get('value', 0.))
def double_value_changed(self):
if self.double_spin.value() > 1:
self.layout().setCurrentIndex(1)
self.int_spin.setValue(self.double_spin.value())
self.valueChanged.emit()
def double_editing_finished(self):
if self.double_spin.value() <= 1.:
self.editingFinished.emit()
def int_value_changed(self):
if self.int_spin.value() == 0:
self.layout().setCurrentIndex(0)
self.double_spin.setValue(1. - self.double_spin.singleStep())
# There is no need to emit valueChanged signal.
def int_editing_finished(self):
if self.int_spin.value() > 0:
self.editingFinished.emit()
def value(self):
return self.int_spin.value() or self.double_spin.value()
def setValue(self, value):
if isinstance(value, int):
self.layout().setCurrentIndex(1)
self.int_spin.setValue(value)
else:
self.layout().setCurrentIndex(0)
self.double_spin.setValue(value)
def setSingleStep(self, step):
if isinstance(step, float):
self.double_spin.setSingleStep(step)
else:
self.int_spin.setSingleStep(step)
class RangeWidget(QWidget):
valueChanged = pyqtSignal()
editingFinished = pyqtSignal()
def __init__(self, widget, master, attribute, minimum=0., maximum=1., step=.05,
min_label=None, max_label=None, allow_absolute=False, dtype=float,
callback=None, *args):
super().__init__(*args)
if widget:
widget.layout().addWidget(self)
self.allow_absolute_values = allow_absolute
self.master = master
self.attribute = attribute
self.min = minimum
self.max = maximum
self.step = step
self.min_label = min_label
self.max_label = max_label
a, b = self.master_value()
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
if self.allow_absolute_values:
SpinBox = AbsoluteRelativeSpinBox
else:
if dtype == float:
SpinBox = QDoubleSpinBox
else:
SpinBox = QSpinBox
if self.min_label:
layout.addWidget(QLabel(self.min_label))
self.min_spin = SpinBox(value=a)
self.min_spin.setSingleStep(self.step)
layout.addWidget(self.min_spin)
if self.max_label:
layout.addWidget(QLabel(self.max_label))
self.max_spin = SpinBox(value=b)
self.max_spin.setSingleStep(self.step)
layout.addWidget(self.max_spin)
self.set_range()
self.min_spin.editingFinished.connect(self._editing_finished)
self.max_spin.editingFinished.connect(self._editing_finished)
if callback:
self.valueChanged.connect(callback)
def synchronize(self):
a, b = self.value()
if isinstance(self.attribute, str):
setattr(self.master, self.attribute, (a, b))
else:
setattr(self.master, self.attribute[0], a)
setattr(self.master, self.attribute[1], b)
self.set_range()
def _editing_finished(self):
value_before = self.master_value()
self.synchronize()
if value_before != self.master_value():
self.editingFinished.emit()
def master_value(self):
if isinstance(self.attribute, str):
return getattr(self.master, self.attribute)
return (getattr(self.master, self.attribute[0]),
getattr(self.master, self.attribute[1]))
def value(self):
return self.min_spin.value(), self.max_spin.value()
def set_range(self):
if not self.allow_absolute_values:
a, b = self.value()
self.min_spin.setRange(self.min, b)
self.max_spin.setRange(a, self.max)
class ResourceLoader(QWidget, OWComponent):
valueChanged = pyqtSignal(str, str)
recent_files = settings.Setting([])
recent_provider = settings.Setting([])
resource_path = settings.Setting('')
def __init__(self, widget, model_format, provider_format,
model_button_label='Model', provider_button_label='Provider'):
QWidget.__init__(self)
OWComponent.__init__(self, widget)
self.model_path = None
layout = QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
self.model_widget = FileWidget(recent_files=self.recent_files, dialog_title='Load model',
dialog_format=model_format, start_dir=None,
on_open=self.load_model, allow_empty=False,
reload_button=False, browse_label=model_button_label)
self.model_path = self.recent_files[0] if self.recent_files else None
layout.addWidget(self.model_widget)
self.provider_widget = FileWidget(recent_files=self.recent_provider, dialog_title='Load provider',
dialog_format=provider_format, start_dir=None,
on_open=self.load_provider, allow_empty=False,
reload_button=False, browse_label=provider_button_label)
layout.addWidget(self.provider_widget)
def load_model(self, path_to_file):
self.model_path = path_to_file
self.valueChanged.emit(self.model_path, self.resource_path)
def load_provider(self, path_to_file):
self.resource_path = path_to_file
self.valueChanged.emit(self.model_path, self.resource_path) | PypiClean |
/Fanery-0.2.5.tar.gz/Fanery-0.2.5/fanery/_term.py | from inspect import ( # noqa
ismodule as is_module,
isfunction as is_function,
isgenerator as is_generator,
isbuiltin as is_builtin,
isclass as is_class
)
from os.path import isfile as _isfile, isdir as _isdir, basename as _basename
from datetime import datetime, date
from collections import Iterable
from decimal import Decimal
from uuid import UUID
class Hict(dict):
"""
Hierarchical dotted dictionary.
"""
def __missing__(self, key):
term = self[key] = Hict()
return term
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def is_str(term):
return isinstance(term, basestring)
def is_number(term):
return isinstance(term, (int, long, float, Decimal))
def is_date(term):
return isinstance(term, (date, datetime))
def is_uuid(term):
return isinstance(term, UUID)
def is_sequence(term):
return hasattr(term, '__iter__') \
and not isinstance(term, (basestring, dict)) \
and not is_generator(term)
def is_dict(term):
return isinstance(term, dict) or type(term) is dict
def is_inet_address(term):
raise NotImplementedError
def is_inet6_address(term):
raise NotImplementedError
def is_file_path(term):
try:
return _isfile(term)
except:
return False
def is_dir_path(term):
try:
return _isdir(term)
except:
return False
try:
from libuuid import uuid4 as gen_uuid, uuid4_bytes as gen_uuid_bytes # noqa
except ImportError:
from uuid import uuid4 as gen_uuid # noqa
def gen_uuid_bytes():
return gen_uuid().bytes
from re import compile as regex
try:
from validate_email_address import validate_email as is_email
except ImportError:
try:
from validate_email import validate_email as is_email
except ImportError:
from email.utils import parseaddr as _parse_email_addr
# borrowed from http://www.regular-expressions.info/email.html
_email_regex = regex(r'''[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?''') # noqa
def is_email(term, check_mx=False, verify=False):
try:
name, email = _parse_email_addr(term)
return _email_regex.match(email)
except:
return False
def to_str(term):
#FIXME: handle any term
return '%s' % term
def to_simple(term):
if not term or isinstance(term, (int, float, basestring)):
return term
elif isinstance(term, dict) or type(term) is dict:
return dict((to_simple(k), to_simple(v))
for k, v in term.iteritems())
elif isinstance(term, Iterable):
return [to_simple(t) for t in term]
else:
return str(term)
try:
from ciso8601 import parse_datetime
except ImportError:
try:
import arrow
def parse_datetime(term):
return arrow.get(term).datetime
except ImportError:
def parse_datetime(term):
return datetime.strptime(term, "%Y-%m-%dT%H:%M:%S.%f")
def parse_term(term, dict_type=dict, parse_dict_key=False):
if term and isinstance(term, basestring):
try:
f = float(term)
i = int(f)
if i == f:
return i
elif str(f) == term:
return f
else:
return Decimal(term)
except:
pass
try:
return UUID(term)
except:
pass
try:
dt = parse_datetime(term)
return dt if dt.time() else dt.date()
except:
return term
elif isinstance(term, dict) or type(term) is dict:
return dict_type((k if parse_dict_key is not True else
parse_term(k, dict_type, parse_dict_key),
parse_term(v, dict_type, parse_dict_key))
for k, v in term.iteritems())
elif is_sequence(term):
return type(term)(parse_term(t, dict_type, parse_dict_key) for t in term) # noqa
else:
return term
try:
from msgpack import packb as to_msgpack, unpackb as parse_msgpack
except ImportError:
from umsgpack import packb as to_msgpack, unpackb as parse_msgpack
finally:
from base64 import b64encode, b64decode
def dump_term(term, **argd):
argd.setdefault('use_bin_type', True)
return b64encode(to_msgpack(to_simple(term), **argd))
def load_term(encoded, **argd):
encoding = argd.pop('encoding', 'utf-8')
term = parse_msgpack(b64decode(encoded), encoding=encoding)
return parse_term(term, **argd)
try:
from ujson import dumps as to_json, loads as parse_json
except ImportError:
try:
from yajl import Encoder, Decoder
parse_json = Decoder().decode
to_json = Encoder().encode
except ImportError:
try:
from jsonlib import write as to_json, read as parse_json
except ImportError:
try:
from cjson import encode as to_json, decode as parse_json
except ImportError:
try:
from simplejson import dumps as to_json, loads as parse_json # noqa
except ImportError:
from json import dumps as to_json, loads as parse_json # noqa
from unicodedata import normalize as _normalize
_punct_regex = regex(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.:]+')
_file_punct_regex = regex(r'[\t !"#$%&\'*/<=>?@\[\\\]^`{|}:]+')
def normalize_filename(term, mode='NFKD'):
assert isinstance(term, basestring), 'bad-type: %r' % term
text = ' '.join(_file_punct_regex.split(_basename(term)))
return _normalize(mode, text).encode('ascii', 'ignore')
def normalize(term, mode='NFKD'):
assert isinstance(term, basestring), 'bad-type: %r' % term
text = ' '.join(_punct_regex.split(term))
return _normalize(mode, text).encode('ascii', 'ignore')
def slugify(term, delim='-'):
return normalize(term).lower().replace(' ', delim) | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/xmpp/README | -------------------------------------------------------------------------------
DojoX XMPP (Jabber Client)
-------------------------------------------------------------------------------
Version .9
Release date: 07/05/2008
-------------------------------------------------------------------------------
Project state: experimental
-------------------------------------------------------------------------------
[ ] l18n support?
[ ] a11y support?
-------------------------------------------------------------------------------
Credits
Dustin Machi
Jason Cline
Revin Guillen
Mike Wilcox - updates
-------------------------------------------------------------------------------
Project description
XMPP Service implementation in pure javascript. Uses BOSH and works cross
domain.
-------------------------------------------------------------------------------
Dependencies:
Dojo Core
-------------------------------------------------------------------------------
Documentation
FIXME
-------------------------------------------------------------------------------
Installation instructions
To use the XMPP test, you should have the appropriate server installed on your
machine. We reccomend Openfire, a real time collaboration server licensed under
the Open Source GPL.:
http://www.igniterealtime.org/projects/openfire/index.jsp
It's very easy to install. Download the version for your machine and launch the installer.
After installation is complete, server settings are made at:
http://127.0.0.1:9090/index.jsp
The settings for the most part are those that suggest the easiest for setup. The main setting
you need to notice is HTTP Binding. This needs to be enabled at port 7070. Also enable
Script Syntax for BOSH clients.
Next go to the top tabs to Users/Groups and create a user or two. It gives you the option to make
a user the Admin - this will overwrite the current Admin.
Now you can launch test_xmppService.html. In the login, use the user ID and password from one of
the users you just created. I used my computer name for Domain, but I'm not sure what this does.
Finally, in HTTP-Bind URL, use the address for Openfire, with the HTTP Bind port of 7070:
http://127.0.0.1:7070/http-bind/
You can open another tab in Firefox and log in as a second user. | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/tiny_mce/classes/tinymce.js | (function(win) {
var whiteSpaceRe = /^\s*|\s*$/g,
undefined, isRegExpBroken = 'B'.replace(/A(.)|B/, '$1') === '$1';
/**
* Core namespace with core functionality for the TinyMCE API all sub classes will be added to this namespace/object.
*
* @static
* @class tinymce
* @example
* // Using each method
* tinymce.each([1, 2, 3], function(v, i) {
* console.log(i + '=' + v);
* });
*
* // Checking for a specific browser
* if (tinymce.isIE)
* console.log("IE");
*/
var tinymce = {
/**
* Major version of TinyMCE build.
*
* @property majorVersion
* @type String
*/
majorVersion : '@@tinymce_major_version@@',
/**
* Major version of TinyMCE build.
*
* @property minorVersion
* @type String
*/
minorVersion : '@@tinymce_minor_version@@',
/**
* Release date of TinyMCE build.
*
* @property releaseDate
* @type String
*/
releaseDate : '@@tinymce_release_date@@',
/**
* Initializes the TinyMCE global namespace this will setup browser detection and figure out where TinyMCE is running from.
*/
_init : function() {
var t = this, d = document, na = navigator, ua = na.userAgent, i, nl, n, base, p, v;
/**
* Constant that is true if the browser is Opera.
*
* @property isOpera
* @type Boolean
* @final
*/
t.isOpera = win.opera && opera.buildNumber;
/**
* Constant that is true if the browser is WebKit (Safari/Chrome).
*
* @property isWebKit
* @type Boolean
* @final
*/
t.isWebKit = /WebKit/.test(ua);
/**
* Constant that is true if the browser is IE.
*
* @property isIE
* @type Boolean
* @final
*/
t.isIE = !t.isWebKit && !t.isOpera && (/MSIE/gi).test(ua) && (/Explorer/gi).test(na.appName);
/**
* Constant that is true if the browser is IE 6 or older.
*
* @property isIE6
* @type Boolean
* @final
*/
t.isIE6 = t.isIE && /MSIE [56]/.test(ua);
/**
* Constant that is true if the browser is IE 7.
*
* @property isIE7
* @type Boolean
* @final
*/
t.isIE7 = t.isIE && /MSIE [7]/.test(ua);
/**
* Constant that is true if the browser is IE 8.
*
* @property isIE8
* @type Boolean
* @final
*/
t.isIE8 = t.isIE && /MSIE [8]/.test(ua);
/**
* Constant that is true if the browser is IE 9.
*
* @property isIE9
* @type Boolean
* @final
*/
t.isIE9 = t.isIE && /MSIE [9]/.test(ua);
/**
* Constant that is true if the browser is Gecko.
*
* @property isGecko
* @type Boolean
* @final
*/
t.isGecko = !t.isWebKit && /Gecko/.test(ua);
/**
* Constant that is true if the os is Mac OS.
*
* @property isMac
* @type Boolean
* @final
*/
t.isMac = ua.indexOf('Mac') != -1;
/**
* Constant that is true if the runtime is Adobe Air.
*
* @property isAir
* @type Boolean
* @final
*/
t.isAir = /adobeair/i.test(ua);
/**
* Constant that tells if the current browser is an iPhone or iPad.
*
* @property isIDevice
* @type Boolean
* @final
*/
t.isIDevice = /(iPad|iPhone)/.test(ua);
/**
* Constant that is true if the current browser is running on iOS 5 or greater.
*
* @property isIOS5
* @type Boolean
* @final
*/
t.isIOS5 = t.isIDevice && ua.match(/AppleWebKit\/(\d*)/)[1]>=534;
// TinyMCE .NET webcontrol might be setting the values for TinyMCE
if (win.tinyMCEPreInit) {
t.suffix = tinyMCEPreInit.suffix;
t.baseURL = tinyMCEPreInit.base;
t.query = tinyMCEPreInit.query;
return;
}
// Get suffix and base
t.suffix = '';
// If base element found, add that infront of baseURL
nl = d.getElementsByTagName('base');
for (i=0; i<nl.length; i++) {
if (v = nl[i].href) {
// Host only value like http://site.com or http://site.com:8008
if (/^https?:\/\/[^\/]+$/.test(v))
v += '/';
base = v ? v.match(/.*\//)[0] : ''; // Get only directory
}
}
function getBase(n) {
if (n.src && /tiny_mce(|_gzip|_jquery|_prototype|_full)(_dev|_src)?.js/.test(n.src)) {
if (/_(src|dev)\.js/g.test(n.src))
t.suffix = '_src';
if ((p = n.src.indexOf('?')) != -1)
t.query = n.src.substring(p + 1);
t.baseURL = n.src.substring(0, n.src.lastIndexOf('/'));
// If path to script is relative and a base href was found add that one infront
// the src property will always be an absolute one on non IE browsers and IE 8
// so this logic will basically only be executed on older IE versions
if (base && t.baseURL.indexOf('://') == -1 && t.baseURL.indexOf('/') !== 0)
t.baseURL = base + t.baseURL;
return t.baseURL;
}
return null;
};
// Check document
nl = d.getElementsByTagName('script');
for (i=0; i<nl.length; i++) {
if (getBase(nl[i]))
return;
}
// Check head
n = d.getElementsByTagName('head')[0];
if (n) {
nl = n.getElementsByTagName('script');
for (i=0; i<nl.length; i++) {
if (getBase(nl[i]))
return;
}
}
return;
},
/**
* Checks if a object is of a specific type for example an array.
*
* @method is
* @param {Object} o Object to check type of.
* @param {string} t Optional type to check for.
* @return {Boolean} true/false if the object is of the specified type.
*/
is : function(o, t) {
if (!t)
return o !== undefined;
if (t == 'array' && (o.hasOwnProperty && o instanceof Array))
return true;
return typeof(o) == t;
},
/**
* Makes a name/object map out of an array with names.
*
* @method makeMap
* @param {Array/String} items Items to make map out of.
* @param {String} delim Optional delimiter to split string by.
* @param {Object} map Optional map to add items to.
* @return {Object} Name/value map of items.
*/
makeMap : function(items, delim, map) {
var i;
items = items || [];
delim = delim || ',';
if (typeof(items) == "string")
items = items.split(delim);
map = map || {};
i = items.length;
while (i--)
map[items[i]] = {};
return map;
},
/**
* Performs an iteration of all items in a collection such as an object or array. This method will execure the
* callback function for each item in the collection, if the callback returns false the iteration will terminate.
* The callback has the following format: cb(value, key_or_index).
*
* @method each
* @param {Object} o Collection to iterate.
* @param {function} cb Callback function to execute for each item.
* @param {Object} s Optional scope to execute the callback in.
* @example
* // Iterate an array
* tinymce.each([1,2,3], function(v, i) {
* console.debug("Value: " + v + ", Index: " + i);
* });
*
* // Iterate an object
* tinymce.each({a : 1, b : 2, c: 3], function(v, k) {
* console.debug("Value: " + v + ", Key: " + k);
* });
*/
each : function(o, cb, s) {
var n, l;
if (!o)
return 0;
s = s || o;
if (o.length !== undefined) {
// Indexed arrays, needed for Safari
for (n=0, l = o.length; n < l; n++) {
if (cb.call(s, o[n], n, o) === false)
return 0;
}
} else {
// Hashtables
for (n in o) {
if (o.hasOwnProperty(n)) {
if (cb.call(s, o[n], n, o) === false)
return 0;
}
}
}
return 1;
},
// #ifndef jquery
/**
* Creates a new array by the return value of each iteration function call. This enables you to convert
* one array list into another.
*
* @method map
* @param {Array} a Array of items to iterate.
* @param {function} f Function to call for each item. It's return value will be the new value.
* @return {Array} Array with new values based on function return values.
*/
map : function(a, f) {
var o = [];
tinymce.each(a, function(v) {
o.push(f(v));
});
return o;
},
/**
* Filters out items from the input array by calling the specified function for each item.
* If the function returns false the item will be excluded if it returns true it will be included.
*
* @method grep
* @param {Array} a Array of items to loop though.
* @param {function} f Function to call for each item. Include/exclude depends on it's return value.
* @return {Array} New array with values imported and filtered based in input.
* @example
* // Filter out some items, this will return an array with 4 and 5
* var items = tinymce.grep([1,2,3,4,5], function(v) {return v > 3;});
*/
grep : function(a, f) {
var o = [];
tinymce.each(a, function(v) {
if (!f || f(v))
o.push(v);
});
return o;
},
/**
* Returns the index of a value in an array, this method will return -1 if the item wasn't found.
*
* @method inArray
* @param {Array} a Array/Object to search for value in.
* @param {Object} v Value to check for inside the array.
* @return {Number/String} Index of item inside the array inside an object. Or -1 if it wasn't found.
* @example
* // Get index of value in array this will alert 1 since 2 is at that index
* alert(tinymce.inArray([1,2,3], 2));
*/
inArray : function(a, v) {
var i, l;
if (a) {
for (i = 0, l = a.length; i < l; i++) {
if (a[i] === v)
return i;
}
}
return -1;
},
/**
* Extends an object with the specified other object(s).
*
* @method extend
* @param {Object} o Object to extend with new items.
* @param {Object} e..n Object(s) to extend the specified object with.
* @return {Object} o New extended object, same reference as the input object.
* @example
* // Extends obj1 with two new fields
* var obj = tinymce.extend(obj1, {
* somefield1 : 'a',
* somefield2 : 'a'
* });
*
* // Extends obj with obj2 and obj3
* tinymce.extend(obj, obj2, obj3);
*/
extend : function(o, e) {
var i, l, a = arguments;
for (i = 1, l = a.length; i < l; i++) {
e = a[i];
tinymce.each(e, function(v, n) {
if (v !== undefined)
o[n] = v;
});
}
return o;
},
// #endif
/**
* Removes whitespace from the beginning and end of a string.
*
* @method trim
* @param {String} s String to remove whitespace from.
* @return {String} New string with removed whitespace.
*/
trim : function(s) {
return (s ? '' + s : '').replace(whiteSpaceRe, '');
},
/**
* Creates a class, subclass or static singleton.
* More details on this method can be found in the Wiki.
*
* @method create
* @param {String} s Class name, inheritage and prefix.
* @param {Object} p Collection of methods to add to the class.
* @param {Object} root Optional root object defaults to the global window object.
* @example
* // Creates a basic class
* tinymce.create('tinymce.somepackage.SomeClass', {
* SomeClass : function() {
* // Class constructor
* },
*
* method : function() {
* // Some method
* }
* });
*
* // Creates a basic subclass class
* tinymce.create('tinymce.somepackage.SomeSubClass:tinymce.somepackage.SomeClass', {
* SomeSubClass: function() {
* // Class constructor
* this.parent(); // Call parent constructor
* },
*
* method : function() {
* // Some method
* this.parent(); // Call parent method
* },
*
* 'static' : {
* staticMethod : function() {
* // Static method
* }
* }
* });
*
* // Creates a singleton/static class
* tinymce.create('static tinymce.somepackage.SomeSingletonClass', {
* method : function() {
* // Some method
* }
* });
*/
create : function(s, p, root) {
var t = this, sp, ns, cn, scn, c, de = 0;
// Parse : <prefix> <class>:<super class>
s = /^((static) )?([\w.]+)(:([\w.]+))?/.exec(s);
cn = s[3].match(/(^|\.)(\w+)$/i)[2]; // Class name
// Create namespace for new class
ns = t.createNS(s[3].replace(/\.\w+$/, ''), root);
// Class already exists
if (ns[cn])
return;
// Make pure static class
if (s[2] == 'static') {
ns[cn] = p;
if (this.onCreate)
this.onCreate(s[2], s[3], ns[cn]);
return;
}
// Create default constructor
if (!p[cn]) {
p[cn] = function() {};
de = 1;
}
// Add constructor and methods
ns[cn] = p[cn];
t.extend(ns[cn].prototype, p);
// Extend
if (s[5]) {
sp = t.resolve(s[5]).prototype;
scn = s[5].match(/\.(\w+)$/i)[1]; // Class name
// Extend constructor
c = ns[cn];
if (de) {
// Add passthrough constructor
ns[cn] = function() {
return sp[scn].apply(this, arguments);
};
} else {
// Add inherit constructor
ns[cn] = function() {
this.parent = sp[scn];
return c.apply(this, arguments);
};
}
ns[cn].prototype[cn] = ns[cn];
// Add super methods
t.each(sp, function(f, n) {
ns[cn].prototype[n] = sp[n];
});
// Add overridden methods
t.each(p, function(f, n) {
// Extend methods if needed
if (sp[n]) {
ns[cn].prototype[n] = function() {
this.parent = sp[n];
return f.apply(this, arguments);
};
} else {
if (n != cn)
ns[cn].prototype[n] = f;
}
});
}
// Add static methods
t.each(p['static'], function(f, n) {
ns[cn][n] = f;
});
if (this.onCreate)
this.onCreate(s[2], s[3], ns[cn].prototype);
},
/**
* Executed the specified function for each item in a object tree.
*
* @method walk
* @param {Object} o Object tree to walk though.
* @param {function} f Function to call for each item.
* @param {String} n Optional name of collection inside the objects to walk for example childNodes.
* @param {String} s Optional scope to execute the function in.
*/
walk : function(o, f, n, s) {
s = s || this;
if (o) {
if (n)
o = o[n];
tinymce.each(o, function(o, i) {
if (f.call(s, o, i, n) === false)
return false;
tinymce.walk(o, f, n, s);
});
}
},
/**
* Creates a namespace on a specific object.
*
* @method createNS
* @param {String} n Namespace to create for example a.b.c.d.
* @param {Object} o Optional object to add namespace to, defaults to window.
* @return {Object} New namespace object the last item in path.
* @example
* // Create some namespace
* tinymce.createNS('tinymce.somepackage.subpackage');
*
* // Add a singleton
* var tinymce.somepackage.subpackage.SomeSingleton = {
* method : function() {
* // Some method
* }
* };
*/
createNS : function(n, o) {
var i, v;
o = o || win;
n = n.split('.');
for (i=0; i<n.length; i++) {
v = n[i];
if (!o[v])
o[v] = {};
o = o[v];
}
return o;
},
/**
* Resolves a string and returns the object from a specific structure.
*
* @method resolve
* @param {String} n Path to resolve for example a.b.c.d.
* @param {Object} o Optional object to search though, defaults to window.
* @return {Object} Last object in path or null if it couldn't be resolved.
* @example
* // Resolve a path into an object reference
* var obj = tinymce.resolve('a.b.c.d');
*/
resolve : function(n, o) {
var i, l;
o = o || win;
n = n.split('.');
for (i = 0, l = n.length; i < l; i++) {
o = o[n[i]];
if (!o)
break;
}
return o;
},
/**
* Adds an unload handler to the document. This handler will be executed when the document gets unloaded.
* This method is useful for dealing with browser memory leaks where it might be vital to remove DOM references etc.
*
* @method addUnload
* @param {function} f Function to execute before the document gets unloaded.
* @param {Object} s Optional scope to execute the function in.
* @return {function} Returns the specified unload handler function.
* @example
* // Fixes a leak with a DOM element that was palces in the someObject
* tinymce.addUnload(function() {
* // Null DOM element to reduce IE memory leak
* someObject.someElement = null;
* });
*/
addUnload : function(f, s) {
var t = this;
f = {func : f, scope : s || this};
if (!t.unloads) {
function unload() {
var li = t.unloads, o, n;
if (li) {
// Call unload handlers
for (n in li) {
o = li[n];
if (o && o.func)
o.func.call(o.scope, 1); // Send in one arg to distinct unload and user destroy
}
// Detach unload function
if (win.detachEvent) {
win.detachEvent('onbeforeunload', fakeUnload);
win.detachEvent('onunload', unload);
} else if (win.removeEventListener)
win.removeEventListener('unload', unload, false);
// Destroy references
t.unloads = o = li = w = unload = 0;
// Run garbarge collector on IE
if (win.CollectGarbage)
CollectGarbage();
}
};
function fakeUnload() {
var d = document;
// Is there things still loading, then do some magic
if (d.readyState == 'interactive') {
function stop() {
// Prevent memory leak
d.detachEvent('onstop', stop);
// Call unload handler
if (unload)
unload();
d = 0;
};
// Fire unload when the currently loading page is stopped
if (d)
d.attachEvent('onstop', stop);
// Remove onstop listener after a while to prevent the unload function
// to execute if the user presses cancel in an onbeforeunload
// confirm dialog and then presses the browser stop button
win.setTimeout(function() {
if (d)
d.detachEvent('onstop', stop);
}, 0);
}
};
// Attach unload handler
if (win.attachEvent) {
win.attachEvent('onunload', unload);
win.attachEvent('onbeforeunload', fakeUnload);
} else if (win.addEventListener)
win.addEventListener('unload', unload, false);
// Setup initial unload handler array
t.unloads = [f];
} else
t.unloads.push(f);
return f;
},
/**
* Removes the specified function form the unload handler list.
*
* @method removeUnload
* @param {function} f Function to remove from unload handler list.
* @return {function} Removed function name or null if it wasn't found.
*/
removeUnload : function(f) {
var u = this.unloads, r = null;
tinymce.each(u, function(o, i) {
if (o && o.func == f) {
u.splice(i, 1);
r = f;
return false;
}
});
return r;
},
/**
* Splits a string but removes the whitespace before and after each value.
*
* @method explode
* @param {string} s String to split.
* @param {string} d Delimiter to split by.
* @example
* // Split a string into an array with a,b,c
* var arr = tinymce.explode('a, b, c');
*/
explode : function(s, d) {
return s ? tinymce.map(s.split(d || ','), tinymce.trim) : s;
},
_addVer : function(u) {
var v;
if (!this.query)
return u;
v = (u.indexOf('?') == -1 ? '?' : '&') + this.query;
if (u.indexOf('#') == -1)
return u + v;
return u.replace('#', v + '#');
},
// Fix function for IE 9 where regexps isn't working correctly
// Todo: remove me once MS fixes the bug
_replace : function(find, replace, str) {
// On IE9 we have to fake $x replacement
if (isRegExpBroken) {
return str.replace(find, function() {
var val = replace, args = arguments, i;
for (i = 0; i < args.length - 2; i++) {
if (args[i] === undefined) {
val = val.replace(new RegExp('\\$' + i, 'g'), '');
} else {
val = val.replace(new RegExp('\\$' + i, 'g'), args[i]);
}
}
return val;
});
}
return str.replace(find, replace);
}
/**#@-*/
};
// Initialize the API
tinymce._init();
// Expose tinymce namespace to the global namespace (window)
win.tinymce = win.tinyMCE = tinymce;
// Describe the different namespaces
/**
* Root level namespace this contains classes directly releated to the TinyMCE editor.
*
* @namespace tinymce
*/
/**
* Contains classes for handling the browsers DOM.
*
* @namespace tinymce.dom
*/
/**
* Contains html parser and serializer logic.
*
* @namespace tinymce.html
*/
/**
* Contains the different UI types such as buttons, listboxes etc.
*
* @namespace tinymce.ui
*/
/**
* Contains various utility classes such as json parser, cookies etc.
*
* @namespace tinymce.util
*/
/**
* Contains plugin classes.
*
* @namespace tinymce.plugins
*/
})(window); | PypiClean |
/Godot-0.1.1.zip/Godot-0.1.1/godot/component/polygon.py | #------------------------------------------------------------------------------
# Imports:
#------------------------------------------------------------------------------
from numpy import array
from enthought.traits.api import \
Instance, Float, Bool, List, Trait, Tuple, on_trait_change
from enthought.traits.ui.api import View, Item, Group
from enthought.enable.api import Component
from enthought.kiva import EOF_FILL_STROKE, FILL_STROKE
from enthought.kiva.agg import points_in_polygon
from pen import Pen
#------------------------------------------------------------------------------
# "Polygon" class:
#------------------------------------------------------------------------------
class Polygon(Component):
""" Component with Polygon traits """
#--------------------------------------------------------------------------
# "Polygon" interface:
#--------------------------------------------------------------------------
# Pen used to draw the polygon
pen = Instance(Pen, desc="the pen with which to draw the polygon")
# Points defining the vertices of the polygon
points = List(
Tuple(Float, Float, labels=["x", "y"], cols=2),
desc="points defining the vertices of the polygon"
)
# Is the polygon filled?
filled = Bool(False, desc="Should the component be filled")
# Rule to use to determine the inside of the polygon
inside_rule = Trait(
"winding", {"winding":FILL_STROKE, "oddeven":EOF_FILL_STROKE},
desc="the rule to use to determine the inside of the polygon"
)
# Background colour of the component
bgcolor = "transparent"#(1.0, 0.5, 0.5, 0.33)
#--------------------------------------------------------------------------
# Views:
#--------------------------------------------------------------------------
traits_view = View(
Group(
Item("pen", style="custom", show_label=False),
label="Pen", show_border=True
),
Group(
Item("points", height=250, show_label=False),
label="Points", show_border=True
),
Item("filled"), Item("inside_rule")
)
#--------------------------------------------------------------------------
# Draw component on the graphics context:
#--------------------------------------------------------------------------
def _draw_mainlayer(self, gc, view_bounds=None, mode="default"):
""" Draws a closed polygon """
gc.save_state()
try:
# self._draw_bounds(gc)
if len(self.points) >= 2:
# Set the drawing parameters.
gc.set_fill_color(self.pen.fill_color_)
gc.set_stroke_color(self.pen.color_)
gc.set_line_width(self.pen.line_width)
# Draw the path.
gc.begin_path()
# x0 = self.points[0][0] - self.x
# y0 = self.points[0][1] + self.y
# gc.move_to(x0, y0)
# offset_points = [(x-self.x, y+self.y) for x, y in self.points]
gc.lines(self.points)
gc.close_path()
if self.filled:
gc.draw_path(self.inside_rule_)
else:
gc.stroke_path()
finally:
gc.restore_state()
def is_in(self, point_x, point_y):
""" Test if a point is within this polygonal region """
point_array = array(((point_x, point_y),))
vertices = array(self.points)
winding = self.inside_rule == "winding"
result = points_in_polygon(point_array, vertices, winding)
return result[0]
def _draw_bounds(self, gc):
""" Draws the component bounds for testing purposes """
dx, dy = self.bounds
x, y = self.position
gc.rect(x, y, dx, dy)
gc.stroke_path()
def normal_left_down(self, event):
""" Handles left mouse button clicks in 'normal' mode """
print "Polygon selected at (%d, %d)" % (event.x, event.y)
@on_trait_change("pen.+,points,filled")
def _update(self):
if not self.points: return
x_points = [x for x, y in self.points]
y_points = [y for x, y in self.points]
x = min(x_points)
x2 = max(x_points)
y = min(y_points)
y2 = max(y_points)
self.position = [x, y]
# Don't let bounds be set to 0, otherwise, horizontal and vertical
# lines will not render because enable skips rendering items with
# bounds=[0,0]
self.bounds = [max(x2-x,1), max(y2-y,1)]
self.request_redraw()
#------------------------------------------------------------------------------
# Stand-alone call:
#------------------------------------------------------------------------------
if __name__ == "__main__":
from pylon.ui.graph.component_viewer import ComponentViewer
pen = Pen()
polygon = Polygon(
pen=pen, points=[(50, 50), (50, 100), (100, 100)],
# bounds=[50, 50], position=[50, 50]
)
viewer = ComponentViewer(component=polygon)
from enthought.enable.primitives.api import Box
box = Box(
color="red", border_color="blue", border_size=1,
bounds=[50, 50], position=[100, 100]
)
# viewer.canvas.add(box)
viewer.configure_traits()
# EOF ------------------------------------------------------------------------- | PypiClean |
/HAFFET-0.1.2.tar.gz/HAFFET-0.1.2/sdapy/model_fitters.py |
import os
import numpy as np
from scipy.optimize import minimize, curve_fit
import matplotlib.pyplot as plt
import emcee, time
from multiprocessing import Pool, cpu_count
from sdapy.functions import *
from sdapy.corner_hack import quantile, corner_hack
from sdapy.filters import central_wavelengths
from sdapy import constants
def get_engine():
mod = __import__('sdapy', fromlist=['engines'])
enginelist = dict()
for _engine in mod.engines.__all__:
if not _engine in mod.engines.__dict__:
print ('Warning: skip engine %s since not registered in engines'%\
_engine)
continue
fit_engine = mod.engines.__dict__[_engine].engine
enginelist[_engine] = fit_engine
return enginelist
def get_model(which_engine=None, with_alias=False):
enginelist = get_engine()
mod = __import__('sdapy', fromlist=['models'])
# accumulate all alias and engine
allwhichs = dict()
for _which in mod.models.__all__:
if not _which in mod.models.__dict__:
print ('Warning: skip model %s since not registered in models'%_which)
continue
fit_pars = mod.models.__dict__[_which].parameters.modelmeta
for _subwhich in fit_pars:
assert 'engine' in fit_pars[_subwhich],\
'Error: define an engine for %s.%s'%(_which, _subwhich)
fit_engine = fit_pars[_subwhich]['engine']
assert fit_engine in enginelist.keys(), \
'Error: wrong engine found for %s, select from %s'%\
(fit_engine, enginelist)
if which_engine is not None and which_engine != fit_engine: continue
if fit_engine not in allwhichs: allwhichs[fit_engine] = []
allwhichs[fit_engine].append(_subwhich.lower())
if 'alias' in fit_pars[_subwhich] and with_alias:
for __ in fit_pars[_subwhich]['alias']: allwhichs[fit_engine].append(__.lower())
return allwhichs
def get_pars(which, with_alias=True):
enginelist = get_engine()
which = which.lower()
mod = __import__('sdapy', fromlist=['models'])
# accumulate all alias and engine
allwhichs = dict()
for _which in mod.models.__all__:
if not _which in mod.models.__dict__:
print ('Warning: skip model %s since not registered in models'%\
_which)
continue
fit_pars = mod.models.__dict__[_which].parameters.modelmeta
for _subwhich in fit_pars:
assert 'engine' in fit_pars[_subwhich],\
'Error: define an engine for %s.%s'%(_which, _subwhich)
fit_engine = fit_pars[_subwhich]['engine']
assert fit_engine in enginelist.keys(), \
'Error: wrong engine found for %s, select from %s'%\
(fit_engine, enginelist)
allwhichs[_subwhich.lower()] = (_which, _subwhich, fit_engine)
if 'alias' in fit_pars[_subwhich] and with_alias:
for __ in fit_pars[_subwhich]['alias']:
allwhichs[__.lower()] = (_which, _subwhich, fit_engine)
# make sure which is in alias
if not which in allwhichs.keys():
print('Error: model %s not exists, chosen from %s'%\
(which, allwhichs.keys()))
return
lv1, lv2, engine = allwhichs[which]
_mod = mod.models.__dict__[lv1].parameters.modelmeta[lv2]
# func
func = None
for k in _mod['func'].split('.'):
if func is None: func = mod.models.__dict__[lv1].__dict__[k]
else: func = func.__dict__[k]
# output dict
output = dict()
output['func'] = func
output['engine'] = enginelist[engine]
output['enginename'] = engine
output['parname'] = _mod['parname']
output['par'] = _mod['par']
# if fit together?
output['same_parameter'] = []
if not 'same_parameter' in _mod: _mod['same_parameter']=[]
for p in _mod['same_parameter']:
assert p in _mod['par'], \
'common parameter %s not in parlist %s?'%(p, _mod['par'])
output['same_parameter'].append(p)
# if fit error?
output['fit_error'] = False
if 'fit_error' in _mod:
output['fit_error'] = _mod['fit_error']
# guess and bounds
bestv, bounds1, bounds2 = [], [], []
for _ in output['par']:
bestv.append( _mod['bestv'][_] )
bounds1.append( _mod['bounds'][_][0] )
bounds2.append( _mod['bounds'][_][1] )
output['bestv'] = bestv
output['bounds'] = (bounds1, bounds2)
return output
class fit_model:
"""Fits data with power law.
power law parts were from:
Miller et al, https://ui.adsabs.harvard.edu/abs/2020ApJ...902...47M/abstract
Parameters
----------
x_data : array
Independent values, e.g. (rest frame) phase relative to peak
y_data : array
Dependent values, e.g. fluxes
yerr_data : array, int
Dependent value errors, e.g. flux errors
filters : array
If filters available, will fit for each band simultaneously.
opt_routine : str,
Which technic to be used to realize optimization.
Possible choices are: 'mcmc', 'minimize', 'leastsq'.
nwalkers : int
if mcmc adopted, set walker number
ncores : int
core numbers to be used for multi processing
nsteps: int
if mcmc adopted, set step
clobber: bool
if power law already done, if redo it or not
verbose: bool
show progress or not
Returns
-------
Returns the interpolated independent and dependent values with the 1-sigma standard deviation.
"""
def __init__(self, x_data, y_data, yerr_data, filters=None):
assert len(x_data) >= 2 and len(x_data) == len(y_data), 'Error: check input data...'
try: # if pandas.dataframe
self.x = x_data.to_numpy()
self.y = y_data.to_numpy()
except: # otherwise, numpy array
self.x = x_data
self.y = y_data
if yerr_data is not None:
assert len(y_data) == len(yerr_data), 'Error: check input data...'
try: # if pandas.dataframe
self.yerr = yerr_data.to_numpy()
except: # otherwise, numpy array
self.yerr = yerr_data
else:
self.yerr = self.y/10.
self.filters = filters
def train(self, opt_routine='curvefit', fit_mean='powerlaw', nwalkers=30,
nsteps=20000, nsteps_burnin=50, ncores=27, thin_by=1, maxfev=20000,
mcmc_h5_file='tmp', emcee_burnin=True, use_emcee_backend=True,
clobber=False, verbose=True, datadir='./', sigma=3, scipysamples=100,
t0=0, timedilation=1, xpredict=None, bestv=None, bounds=None):
assert opt_routine in ['mcmc', 'minimize', 'leastsq']
self.opt_routine = opt_routine
self.nwalkers = nwalkers
self.nsteps = nsteps
self.nsteps_burnin = nsteps_burnin
if ncores is None: self.ncores = cpu_count() - 1
else: self.ncores = ncores
self.thin_by = thin_by
self.maxfev = maxfev
self.emcee_burnin = emcee_burnin
self.use_emcee_backend = use_emcee_backend
self.clobber = clobber
self.verbose = verbose
self.sigma = sigma
self.scipysamples = scipysamples
self.datadir = datadir
self.t0 = t0
self.timedilation = timedilation
self.xpredict = xpredict
_which = get_pars(fit_mean)
self.func = _which['func']
if bestv is None: self.bestv = _which['bestv']
else: self.bestv = bestv
if bounds is None: self.bounds = _which['bounds']
else: self.bounds = bounds
self.par = _which['par']
self.parname = _which['parname']
self.fit_error = _which['fit_error']
assert self.func is not None, 'define func correctly'
assert self.bestv is not None, 'define bestv correctly'
assert self.bounds is not None, 'define bounds correctly'
self.cachefile = '%s/%s'%(self.datadir, mcmc_h5_file)
if self.filters is not None:
self.suffix = '_'
for f in central_wavelengths:
if f in self.filters: self.suffix += f
else:
self.suffix = ''
self.samples, self.lnprob, self.cl = dict(), dict(), []
if self.filters is None:
# bolometric lc
self.lnposterior = self.lnposterior1
# guess best fit
try:
p0, _ = self.run_scipy(self.func, self.bestv, self.bounds,
self.x, self.y, self.yerr, sigma=self.sigma, maxfev=self.maxfev)
except:
p0 = self.bestv
self.samples, self.lnprob = self.run(p0, self.bounds)
elif len(_which['same_parameter'])>0 and opt_routine=='mcmc':
# fit mcmc with all bands simultaneously, with some free parameters fitted in all bands
self.lnposterior = self.lnposterior2
# rearange parameters
for p in _which['same_parameter']:
assert p in self.par
for _n, _p in enumerate(self.par):
if _p == p: self.cl.append(_n)
assert len(self.cl) > 0
# run
pos = []
for f in np.unique(self.filters):
_ = np.where(self.filters == f)
try:
p0, _ = self.run_scipy(self.func, self.bestv, self.bounds,
self.x[_], self.y[_], self.yerr[_], sigma=self.sigma, maxfev=self.maxfev)
except:
p0 = self.bestv
_bounds1, _bounds2 = self.bounds[0], self.bounds[1]
if self.fit_error: p0 = np.append( p0, constants.fsigma_p0 )
pos = np.append(pos, p0)
if not self.fit_error:
bounds = [self.bounds[0] * len(np.unique(self.filters)),
self.bounds[1] * len(np.unique(self.filters))]
nn = len(self.par)
else:
bounds = [(self.bounds[0]+[constants.fsigma_bounds[0]]) * len(np.unique(self.filters)),
(self.bounds[1]+[constants.fsigma_bounds[1]]) * len(np.unique(self.filters))]
nn = len(self.par) + 1
samples, lnprob = self.run(pos, bounds)
# assign samples to each bands
nf = 0
for f in central_wavelengths:
if f in self.filters:
samp = np.ndarray(shape=(len(samples),nn), dtype=float)
for nrow, row in enumerate(samples):
for col in range(nf*nn, (nf+1)*nn):
if col%nn in self.cl: samp[nrow, col%nn] = row[col%nn]
else: samp[nrow, col%nn] = row[col]
self.samples[f] = samp
self.lnprob[f] = lnprob
nf += 1
else:
# fit mcmc or scipy in each band successively
#self.cl, self.nll, self.lnposterior = [], self.nll1, self.lnposterior1
self.cl, self.lnposterior = [], self.lnposterior1
for f in np.unique(self.filters):
_ = np.where(self.filters == f)
try:
p0, _ = self.run_scipy(self.func, self.bestv, self.bounds,
self.x[_], self.y[_], self.yerr[_], sigma=self.sigma, maxfev=self.maxfev)
except:
p0 = self.bestv
self.samples[f], self.lnprob[f] = self.run(p0, self.bounds, filt=f)
def run(self, bestv, bounds, filt=None):
if self.filters is None:
cachefile = '%s%s'%(self.cachefile, self.suffix)
x, y, yerr = self.x, self.y, self.yerr
filters=None
elif len(self.cl)>0:
cachefile = '%s%s'%(self.cachefile, self.suffix)
x, y, yerr = self.x, self.y, self.yerr
filters=self.filters
else:
assert filt is not None
cachefile = '%s_%s'%(self.cachefile, filt)
_ = np.where(self.filters == filt)
x = self.x[_]
y = self.y[_]
yerr = self.yerr[_]
filters = self.filters[_]
if not '.h5' in cachefile: cachefile = '%s.h5'%cachefile
# check if cache exists or not
if os.path.exists(cachefile) and self.clobber: os.remove( cachefile )
elif os.path.exists(cachefile) and not self.clobber:
if self.opt_routine=='mcmc':
# check steps, if sample steps less than demanded, continue the chain
sample_nsteps = get_samples_nstep(cachefile, self.thin_by)
if self.nsteps <= sample_nsteps:
print ('Warning: cached file exists includes %i steps, larger than %i, reload'%
(sample_nsteps,self.nsteps))
samples, lnprob = get_samples_mc(cachefile)
return samples, lnprob
else:
print ('Warning: cached file exists includes %i steps, less than %i, go on for the further %i steps'%
(sample_nsteps,self.nsteps,self.nsteps-sample_nsteps))
self.nsteps -= sample_nsteps
else:
samples, lnprob = get_samples_scipy(cachefile)
return samples, lnprob
if self.opt_routine in ['minimize', 'leastsq']: # run scipy fit
samples, lnprob = self.run_scipy(self.func, bestv, bounds, x, y, yerr,
sigma=self.sigma, maxfev=self.maxfev, nsamp=self.scipysamples)
# store samplles
hf = h5py.File(cachefile, 'w')
hf.create_dataset('samples', data=samples)
hf.create_dataset('lnprob', data=lnprob)
hf.close()
return samples, lnprob
elif self.opt_routine == 'mcmc': # emcee fit
args = (x, y, yerr, bestv, bounds, filters, cachefile)
if not os.path.exists(cachefile): self.run_mcmc(*args)
else: self.continue_chains(*args)
samples, lnprob = get_samples_mc(cachefile)
return samples, lnprob
# optimization routine for hyperparameters
def run_mcmc(self, t_data, f_data, f_unc_data, bestv, bounds, filters=None, cachefile='tmp.h5'):
''' initial fit '''
t_mcmc_start = time.time()
if len(self.cl) > 0:
args = (f_data, t_data, f_unc_data, filters, self.cl, self.func, bounds, self.fit_error)
else:
args = (f_data, t_data, f_unc_data, self.func, bounds, self.fit_error)
ndim = len(bestv)
pos = bestv + 1e-4 * np.random.randn(self.nwalkers, ndim)
with Pool(self.ncores) as pool:
if self.emcee_burnin:
burn_sampler = emcee.EnsembleSampler(self.nwalkers, ndim,
self.lnposterior, args=args, pool=pool)
pos, _, _ = burn_sampler.run_mcmc(pos, nsteps=self.nsteps_burnin,
thin_by=self.thin_by, progress=self.verbose)
if self.use_emcee_backend:
# file to save samples
filename = cachefile
backend = emcee.backends.HDFBackend(filename)
backend.reset(self.nwalkers, ndim)
sampler = emcee.EnsembleSampler(self.nwalkers, ndim,
self.lnposterior, args=args, backend=backend, pool=pool)
else:
sampler = emcee.EnsembleSampler(self.nwalkers, ndim,
self.lnposterior, args=args, pool=pool)
max_samples = self.nsteps
old_tau = np.inf
steps_so_far, tau = 0, None
for sample in sampler.sample(pos, iterations=max_samples,
thin_by=self.thin_by, progress=False):
if sampler.iteration <= int(1e3/self.thin_by):
continue
elif ((int(1e3/self.thin_by) < sampler.iteration <= int(1e4/self.thin_by))
and sampler.iteration % int(1e3/self.thin_by)):
continue
elif ((int(1e4/self.thin_by) < sampler.iteration <= int(1e5/self.thin_by))
and sampler.iteration % int(1e4/self.thin_by)):
continue
elif ((int(1e5/self.thin_by) < sampler.iteration) and
sampler.iteration % int(2e4/self.thin_by)):
continue
tstart = time.time()
tau = sampler.get_autocorr_time(tol=0)
tend = time.time()
steps_so_far = sampler.iteration
if self.verbose:
print('''After {:d} steps,
autocorrelation takes {:.3f} s ({} total FFTs)
acceptance fraction = {:.4f}, and
tau = {}'''.format(steps_so_far,
tend-tstart, self.nwalkers*ndim,
np.mean(sampler.acceptance_fraction),
tau))
# Check convergence
converged = np.all(tau * 100 < sampler.iteration)
converged &= np.all(np.abs(old_tau - tau) / tau < 0.01)
if converged:
break
old_tau = tau
if self.verbose:
print("Ran {} steps; final tau= {}".format(steps_so_far*self.thin_by, tau))
t_mcmc_end = time.time()
print("All in = {:.2f} s on {} cores".format(t_mcmc_end - t_mcmc_start, self.ncores))
def continue_chains(self, t_data, f_data, f_unc_data, bestv, bounds, filters=None, cachefile='tmp.h5'):
'''Run MCMC for longer than initial fit'''
t_mcmc_start = time.time()
if len(self.cl) > 0:
args = (f_data, t_data, f_unc_data, filters, self.cl, self.func, bounds, self.fit_error)
else:
args = (f_data, t_data, f_unc_data, self.func, bounds, self.fit_error)
with Pool(self.ncores) as pool:
# file to save samples
filename = cachefile
new_backend = emcee.backends.HDFBackend(filename)
_, nwalkers, ndim = np.shape(new_backend.get_chain())
new_sampler = emcee.EnsembleSampler(nwalkers, ndim, self.lnposterior,
args=args, pool=pool, backend=new_backend)
max_samples = self.nsteps
if max_samples <= 2e3:
print ('Warning: nsteps should be larger than 2000')
return
steps_so_far = new_sampler.iteration
old_tau = new_sampler.get_autocorr_time(tol=0)
for i in range(int(max_samples/(2e3/self.thin_by))):
new_sampler.run_mcmc(None, int(2e3/self.thin_by),
thin_by=self.thin_by, progress=False)
tstart = time.time()
tau = new_sampler.get_autocorr_time(tol=0)
tend = time.time()
steps_so_far = new_sampler.iteration
print('''After {:d} steps,
autocorrelation takes {:.3f} s ({} total FFTs)
acceptance fraction = {:.4f}, and
tau = {}'''.format(steps_so_far,
tend-tstart, nwalkers*ndim,
np.mean(new_sampler.acceptance_fraction),
tau))
# Check convergence
converged = np.all(tau * 100 < new_sampler.iteration)
converged &= np.all(np.abs(old_tau - tau) / tau < 0.01)
if converged:
break
old_tau = tau
print("Ran {} steps; final tau= {}".format(steps_so_far*self.thin_by, tau))
t_mcmc_end = time.time()
print("All in = {:.2f} s on {} cores".format(t_mcmc_end - t_mcmc_start, self.ncores))
def save_corner(self, figpath, datadir=None, filts=None, limit=0, quantiles=[.05,.95], clobber=False):
''' generate corner plots '''
assert 'samples' in self.__dict__
assert 'lnprob' in self.__dict__
parname = self.parname
if self.fit_error and not r'$f_\mathrm{\sigma}$' in parname:
parname += [r'$f_\mathrm{\sigma}$']
if datadir is None: datadir = self.datadir
fignames = []
if self.filters is None:
_fign = '{}/{}.png'.format(datadir, figpath)
if os.path.exists(_fign) and not clobber: pass
else:
if os.path.exists(_fign): os.remove(_fign)
samples = self.filter_samples(self.samples, self.lnprob, limit=limit)
cfig = corner_hack(samples, labels=parname,
label_kwargs={'fontsize':16}, ticklabelsize=13,
show_titles=True, quantiles=quantiles,
title_fmt=".2f", title_kwargs={'fontsize':16},
plot_datapoints=True, plot_contours=True)
cfig.savefig(_fign, bbox_inches='tight')
fignames.append(_fign)
else:
if len(self.cl) == 0:
# make corner plots for different bands seperately
for f in np.unique(self.filters):
if filts is not None and f not in filts: continue
_fign = '{}/{}_{}.png'.format(datadir, figpath, f)
if os.path.exists(_fign) and not clobber: pass
else:
if os.path.exists(_fign): os.remove(_fign)
samples = self.filter_samples(self.samples[f],
self.lnprob[f], limit=limit)
cfig = corner_hack(samples, labels=parname,
label_kwargs={'fontsize':16}, ticklabelsize=13,
show_titles=True, quantiles=quantiles,
title_fmt=".2f", title_kwargs={'fontsize':16},
plot_datapoints=True, plot_contours=True)
cfig.savefig(_fign, bbox_inches='tight')
fignames.append(_fign)
else:
# make corner plots for different bands together
npar = (len(parname)-len(self.cl)) * len(np.unique(self.filters)) + len(self.cl)
for f in central_wavelengths:
if f in self.filters:
fc = f
break
samples = np.zeros((np.shape(self.samples[fc])[0],npar))
paramsNames, nc = [], 0
for n in self.cl:
samples[:,nc] = self.samples[fc][:,n]
paramsNames += [parname[n]]
nc += 1
for f in np.unique(self.filters):
for nn, n in enumerate(parname):
if nn % len(parname) in self.cl: continue
samples[:,nc] = self.samples[f][:,nn]
paramsNames += ['%s_%s'%(n, f)]
nc += 1
_fign = '{}/{}.png'.format(datadir, figpath)
if os.path.exists(_fign) and not clobber: pass
else:
if os.path.exists(_fign): os.remove(_fign)
cfig = corner_hack(samples, labels=paramsNames,
label_kwargs={'fontsize':16}, ticklabelsize=13,
show_titles=True, quantiles=quantiles,
title_fmt=".2f", title_kwargs={'fontsize':16},
plot_datapoints=True, plot_contours=True)
cfig.savefig(_fign, bbox_inches='tight')
fignames.append(_fign)
return fignames
def get_par(self, filt=None, quant=[.05,.5,.95], parname=None):
assert 'samples' in self.__dict__
if self.samples is None: return None, None, None
mean, p1, p2 = [], [], []
par = self.par
for _ in range(len(par)):
if self.filters is None:
xs = self.samples[:,_]
else:
assert filt is not None
xs = self.samples[filt][:,_]
ql,q,qu = quantile(np.atleast_1d(xs), quant, weights=None)
mean.append(q)
p1.append(ql)
p2.append(qu)
if parname is None: return p1, mean, p2
assert parname in self.par
__ = np.where(np.array(self.par) == parname)
return np.array(p1)[__][0], np.array(mean)[__][0], np.array(p2)[__][0]
def predict(self, x_pred=None, step = 1, returnv=False, quant=[.05,.5,.95]):
''' output fitting products '''
if x_pred is None:
if self.xpredict is not None:
x_pred = np.arange(min(self.xpredict), max(self.xpredict), step)
else:
x_pred = np.arange(self.x.min(), self.x.max(), step)
x_predl, y_predl, y_pred_1, y_pred_2, _ws = [], [], [], [], []
if self.filters is not None:
for _f in np.unique(self.filters):
q1,q,q2 = self.get_par(filt=_f, quant=quant)
if q1 is None: continue
y = self.func( x_pred, *q )
y1 = self.func( x_pred, *q1 )
y2 = self.func( x_pred, *q2 )
x_predl.append( x_pred )
y_predl.append( y )
y_pred_1.append( y1 )
y_pred_2.append( y2 )
_ws.append( np.array( len(x_pred) * [_f]) )
else:
q1,q,q2 = self.get_par(filt=None, quant=quant)
if q1 is not None:
y = self.func( x_pred, *q )
y1 = self.func( x_pred, *q1 )
y2 = self.func( x_pred, *q2 )
x_predl, y_predl, y_pred_1, y_pred_2 = x_pred, y, y1, y2
if returnv:
return np.array(x_predl)*self.timedilation+self.t0, np.array(y_predl), \
np.array(y_pred_1), np.array(y_pred_2), np.array(_ws)
self.x_pred = np.array(x_predl)*self.timedilation+self.t0
self.y_pred = np.array(y_predl)
self.y_pred1 = np.array(y_pred_1)
self.y_pred2 = np.array(y_pred_2)
self.f_pred = np.array(_ws)
def predict_random(self, limit=0., plotnsamples=100, x_pred=None, step = 1):
''' output fitting products '''
if x_pred is None:
if self.xpredict is not None:
x_pred = np.arange(min(self.xpredict), max(self.xpredict), step)
else:
x_pred = np.arange(self.x.min(), self.x.max(), step)
if not 'theta_samp' in self.__dict__:
self.get_random_samples(limit=limit, plotnsamples=plotnsamples)
x_predl, y_predl, _ws = [], [], []
if 'theta_samp' in self.__dict__:
if self.filters is not None:
for _f in np.unique(self.filters):
if _f not in self.theta_samp: continue
nsamples = min(len(self.theta_samp[_f]), plotnsamples)
for i in range(nsamples):
par = self.theta_samp[_f][i]
y = self.func( x_pred, *par[0:len(self.bestv)] )
x_predl.append( x_pred )
y_predl.append( y )
_ws.append( _f )
else:
nsamples = min(len(self.theta_samp), plotnsamples)
for i in range(nsamples):
par = self.theta_samp[i]
y = self.func( x_pred, *par[0:len(self.bestv)] )
x_predl.append( x_pred )
y_predl.append( y )
return np.array(x_predl)*self.timedilation+self.t0, np.array(y_predl), np.array(_ws)
def get_random_samples(self, limit=0.2, plotnsamples=100):
assert 'samples' in self.__dict__
assert 'lnprob' in self.__dict__
if self.samples is None: return
if self.filters is None:
# best sample
self.theta = self.samples[np.argmax(self.lnprob)]
# other samples
_samples = self.filter_samples(self.samples, self.lnprob, limit=limit)
nsamples = min(len(_samples), plotnsamples)
self.theta_samp = []
for samp_num in np.random.choice(range(len(_samples)),nsamples, replace=False):
self.theta_samp.append( _samples[samp_num] )
else:
self.theta, self.theta_samp = dict(), dict()
for _f in np.unique(self.filters):
if _f not in self.samples:continue
# best sample
self.theta[_f] = self.samples[_f][np.argmax(self.lnprob[_f])]
# other samples
_samples = self.filter_samples(self.samples[_f], self.lnprob[_f], limit=limit)
nsamples = min(len(_samples), plotnsamples)
self.theta_samp[_f] = []
for samp_num in np.random.choice(range(len(_samples)),nsamples, replace=False):
self.theta_samp[_f].append( _samples[samp_num] )
@staticmethod
def filter_samples(samples, lnprob, limit=0.):
thre = min(lnprob) + (max(lnprob) - min(lnprob)) * limit
theta_samp = []
for nn, samp_num in enumerate(samples):
if lnprob[nn] >= thre: theta_samp.append( samp_num )
return np.array(theta_samp)
def set_peak(self):
if self.filters is None:
xx = self.x_pred
yy = self.y_pred
y_pred1 = self.y_pred1
y_pred2 = self.y_pred2
__ = np.argmax(yy)
yye = penc_to_errors(yy[__], y_pred1[__], y_pred2[__])
self.tpeak = xx[np.argmax(yy)]
self.fpeak = ( max(yy), yye )
else:
self.tpeak, self.fpeak = dict(), dict()
for _f in np.unique(self.filters):
_ = np.where(self.f_pred==_f)
xx = self.x_pred[_]
yy = self.y_pred[_]
y_pred1 = self.y_pred1[_]
y_pred2 = self.y_pred2[_]
__ = np.argmax(yy)
yye = penc_to_errors(yy[__], y_pred1[__], y_pred2[__])
self.tpeak[_f] = xx[np.argmax(yy)]
self.fpeak[_f] = ( max(yy), yye )
@staticmethod
def run_scipy(func, p0, bounds, x, y, yerr, sigma=3, maxfev=2000, nsamp=None):
try:
params, covar = curve_fit(func, x, y, method='trf',
sigma=yerr, p0=p0, absolute_sigma=True, maxfev=maxfev, bounds=bounds)
except:
#import matplotlib.pyplot as plt
#plt.plot(x,y,'ko')
#plt.savefig('tmp.png')
params, covar = curve_fit(func, x, y, method='trf', p0=p0, maxfev=maxfev, bounds=bounds)
perr = np.sqrt(np.diag(covar))
if nsamp is None: return params, perr
try:
# https://stackoverflow.com/questions/70263573/generate-200-data-points-drawn-from-a-multivariate-normal-distribution-with-mean
w, v = np.linalg.eig(covar)
sigmas = np.sqrt(w) * v
A = sigma @ np.random.randn(len(params), nsamp) + np.array([params]).T
for _n in range(len(params)):
for _nn in range(nsamp):
samples[_nn, _n] = A.T[_nn][_n]
except:
# ignore covariance that are not diagonal
samples = np.ndarray(shape=(nsamp,len(p0)), dtype=float)
lnprob = np.ndarray(shape=(nsamp,1), dtype=float)
for _n, (_1,_2) in enumerate(zip(p0 + perr*sigma, p0 - perr*sigma)):
__ = np.random.uniform(low=_1, high=_2, size=(nsamp))
for _nn in range(nsamp): samples[_nn, _n] = __[_nn]
for _theta in samples:
model = func(x, *_theta)
lnprob[_nn, 0] = -0.5*np.sum((y - model)**2 / ((yerr)**2)) - \
np.sum(np.log(yerr)) - 0.5*len(model)*np.log(2*np.pi)
return samples, lnprob
@staticmethod
def lnlikelihood1(theta, f, t, f_err, func, fit_error):
if fit_error:
theta, f_sigma = theta[:-1], theta[-1]
model = func(t, *theta)
#assert np.all(model > -np.inf)
ln_l = -0.5*np.sum((f - model)**2 / ((f_sigma*f_err)**2)) - \
np.sum(np.log(f_sigma*f_err)) - 0.5*len(model)*np.log(2*np.pi)
else:
model = func(t, *theta)
#assert np.all(model > -np.inf)
ln_l = -0.5*np.sum((f - model)**2 / f_err**2) + np.sum(np.log(1/np.sqrt(2*np.pi*f_err**2)))
return ln_l
@staticmethod
def lnprior1(theta, bounds):
for n, t in enumerate(theta):
if t < bounds[0][n] or t > bounds[1][n]:
return -np.inf
return 0
@staticmethod
def lnposterior1(theta, f, t, f_err, func, bounds, fit_error):
lnp = fit_model.lnprior1(theta, bounds)
lnl = fit_model.lnlikelihood1(theta, f, t, f_err, func, fit_error)
if not np.isfinite(lnl): return -np.inf
if not np.isfinite(lnp): return -np.inf
return lnl + lnp
@staticmethod
def lnlikelihood2(theta, f, t, f_err, filters, cl, func, fit_error):
n_filt = len(np.unique(filters))
n_theta = int(len(theta) / n_filt)
ln_l = 0
f_num = 0
for filt in central_wavelengths:
# !!! arange filter order depending on central_wavelengths
if not filt in filters: continue
__theta = theta[f_num*n_theta : (f_num+1)*n_theta]
# common parameters
for _ in cl: __theta[_] = theta[_]
f_obs = np.where(filters == filt)
ln_l += fit_model.lnlikelihood1(__theta, f[f_obs], t[f_obs], f_err[f_obs], func, fit_error)
f_num += 1
return ln_l
@staticmethod
def lnprior2(theta, filters, cl, bounds):
n_filt = len(np.unique(filters))
n_theta = int(len(theta) / n_filt)
ln_p = 0
f_num = 0
for filt in central_wavelengths:
if not filt in filters: continue
__theta = theta[f_num*n_theta : (f_num+1)*n_theta]
# common parameters
for _ in cl: __theta[_] = theta[_]
__bounds = [bounds[0][f_num*n_theta : (f_num+1)*n_theta],
bounds[1][f_num*n_theta : (f_num+1)*n_theta]]
ln_p += fit_model.lnprior1(__theta, __bounds)
f_num += 1
return ln_p
@staticmethod
def lnposterior2(theta, f, t, f_err, filters, cl, func, bounds, fit_error):
lnp = fit_model.lnprior2(theta, filters, cl, bounds)
lnl = fit_model.lnlikelihood2(theta, f, t, f_err, filters, cl, func, fit_error)
if not np.isfinite(lnl): return -np.inf
if not np.isfinite(lnp): return -np.inf
return lnl + lnp | PypiClean |
/MACS_virtual_experiment-0.168.tar.gz/MACS_virtual_experiment-0.168/MACS_virtual_experiment/Demonstration/.ipynb_checkpoints/Demonstration of Virtual MACS expt object-checkpoint.ipynb | ```
import sys
import os
import numpy as np
cwd = os.getcwd()
#Add the directory of the module to the path.
sys.path.append('/'.join(cwd.split('/')[0:-1]))
from virtualMACS import virtualMACS
print('/'.join(cwd.split('/')[0:-1]))
```
# Initialize the object with a clear name and a cif file
```
testobj = virtualMACS('test_experiment',cifName='TiO2.cif')
testobj.sample.formula_weight=79.87
#File I/O operations require sudo access. Update to your password below.
testobj.sudo_password='password'
```
## Set the sample parameters
In this example, we use a box of the same dimensions as in the reference experiment. (4.3mm x 3.3mm x 1.3 mm) (x,y,z)
```
testobj.sample.sample_shape='box'
testobj.sample.sample_widx=4.3e-3
testobj.sample.sample_widy=3.3e-3
testobj.sample.sample_widz=1.3e-3
testobj.sample.sample_tilt=-30
'''
testobj.sample.sample_shape='cylinder'
testobj.sample.sample_length=0.02
testobj.sample.sample_diameter_d=0.01
'''
```
## Assign Monochromator Parameters
In this example, we use Ei=5meV = Ef
```
testobj.monochromator.Ei = 5.0
testobj.monochromator.Ef = 5.0
```
## Assign Kidney Parameters
```
testobj.kidney.Ef=5.0
testobj.kidney.kidney_angle=-10.0
testobj.sample.cif2lau()
```
## Checking sample orientation and projection into lab frame is correct.
```
testobj.sample.Qmag_HKL(1,1,0)
#Tilt the sample
testobj.sample.crystal_axis_xrot=60.0
testobj.sample.crystal_axis_zrot=-15.0
testobj.sample.crystal_axis_zrot=0.0
print('Sample Lattice vectors')
print('')
print('a='+str(testobj.sample.a))
print('alpha='+str(testobj.sample.alpha))
print('b='+str(testobj.sample.b))
print('beta='+str(testobj.sample.beta))
print('c='+str(testobj.sample.c))
print('gamma='+str(testobj.sample.gamma))
print('')
print('Sample orientation U')
print(testobj.sample.orient_u)
testobj.sample.orient_u=[1,1,0]
testobj.sample.orient_v=[0,0,1]
print('Sample orientation v')
print(testobj.sample.orient_v)
print('')
testobj.sample.project_sample_realspace()
print('Real Space projection of lattice vectors [ax,ay,az; bx,by,bz;cx,cy,cz]')
print(testobj.sample.labframe_mat)
print('')
print('Structure factors:')
print('|F(110)|^2 = '+str(round(testobj.sample.fetch_F_HKL(1,1,0)[3],4))+' barn')
print('|F(100)|^2 = '+str(round(testobj.sample.fetch_F_HKL(1,0,0)[3],4))+' barn')
print('|F(1-10)|^2 = '+str(round(testobj.sample.fetch_F_HKL(1,-1,0)[3],4))+' barn')
print('|F(001)|^2 = '+str(round(testobj.sample.fetch_F_HKL(0,0,1)[3],4))+' barn')
```
## Check that some sample dependent cross sections are calculated correctly
```
print('sigma_abs ='+str(testobj.sample.rho_abs)+' barn/unit cell')
print('sigma_inc ='+str(testobj.sample.sigma_inc)+' barn/unit cell')
```
## If the instrument file has not been prepared and compiled, do so now.
```
useOld=False
if useOld==True:
testobj.useOld=True
testobj.prepare_old_expt_directory()
#testobj.clean_expt_directory()
else:
testobj.prepare_expt_directory()
testobj.edit_instr_file()
testobj.compileInstr()
testobj.compileMonochromator()
testobj.n_mono=1e7
testobj.n_sample=1e6
```
## To clean the directory and prepare a new virtual experiment, try something like the following.
Particularly important is to set the data matrix to false, as this will reset all of the results.
```
testobj.clean_expt_directory()
testobj.unmount_ramdisk()
testobj.data.data_matrix=False
```
## Change the parameters slightly and run another scan
```
#Use parameters that Mads used for his test
'''
testobj.monochromator.Ei=9.078
testobj.kidney.Ef=9.078
testobj.A3_angle = 67.37
testobj.kidney.kidney_angle=3.0
testobj.preserve_kidney_scan_files=False
testobj.runMonoScan()
testobj.runKidneyScan()
#This may be immediately converted to ng0 if you wish.
'''
```
## Scripting is simple. Simply specific a set of A3 angles and a list of incident energies and the package will handle the rest.
```
testobj.n_sample=1e6
testobj.kidney_angle_resolution=0.3
testobj.preserve_kidney_scan_files=False
testobj.script_scan(A3_list=np.arange(-70,-20,0.25),Ei_list=[5.0],\
num_threads=6,scan_title='_tio2_a3scan_cube')
testobj.data.load_data_matrix_from_csv('_tio2_a3scan_cube_dataMatrix.csv')
testobj.data.write_data_to_ng0('tio2_a3scan_cube.ng0')
```
## It is also simple to emulate an experimental scan using the same values of A3, kidney angle, Ei, and Ef
```
testobj.data.data_matrix=False
sample_ng0 = 'Example_ng0_files/fpx78891.ng0'
testobj.n_sample=1e6
testobj.simulate_ng0(sample_ng0,n_threads=8)
```
## We can also do this for a directory of ng0 files
```
ngo_dir = 'Example_ng0_files/'
#testobj.simulate_ng0dir(ngo_dir,n_threads=6)
```
## At any point the files in the kidney scan folder can be converted into MSlice readable ng0 files. The files may be divided into individual Ei values or combined into a single larger one. If they originate from ng0 files, they may also be individual ng0 files corresponding to their origin files
```
#Here we combine any scans that exist individually and append them to the data holder class
#testobj.data.combine_csv_scans(preserve_old=True,flagstr='_combined_')
testobj.data.load_data_matrix_from_csv()
#The data is now written to a MACS style file for comparison in MSlice.
testobj.data.write_data_to_ng0(filename='_cube_tio2_a3scan.ng0',beta_1=testobj.monochromator.beta_1,\
beta_2=testobj.monochromator.beta_2)
testobj.data.combine_all_csv()
testobj.data.load_data_matrix_from_csv('_total.csv')
testobj.data.write_data_to_ng0(filename='_a3scan.ng0')
```
| PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/electrum_chi/electrum/plugins/coldcard/README.md |
# Coldcard Hardware Wallet Plugin
## Just the glue please
This code connects the public USB API and Electrum. Leverages all
the good work that's been done by the Electrum team to support
hardware wallets.
## Background
The Coldcard has a larger screen (128x64) and a number pad. For
this reason, all PIN code entry is done directly on the device.
Coldcard does not appear on the USB bus until unlocked with appropriate
PIN. Initial setup, and seed generation must be done offline.
Coldcard uses an emerging standard for unsigned tranasctions:
PSBT = Partially Signed Bitcoin Transaction = BIP174
However, this spec is still under heavy discussion and in flux. At
this point, the PSBT files generated will only be compatible with
Coldcard.
The Coldcard can be used 100% offline: it can generate a skeleton
Electrum wallet and save it to MicroSD card. Transport that file
to Electrum and it will fetch history, blockchain details and then
operate in "unpaired" mode.
Spending transactions can be saved to MicroSD using the "Export PSBT"
button on the transaction preview dialog (when this plugin is
owner of the wallet). That PSBT can be signed on the Coldcard
(again using MicroSD both ways). The result is a ready-to-transmit
bitcoin transaction, which can be transmitted using Tools > Load
Transaction > From File in Electrum or really any tool.
<https://coldcardwallet.com>
## TODO Items
- No effort yet to support translations or languages other than English, sorry.
- Coldcard PSBT format is not likely to be compatible with other devices, because the BIP174 is still in flux.
- Segwit support not 100% complete: can pay to them, but cannot setup wallet to receive them.
- Limited support for segwit wrapped in P2SH.
- Someday we could support multisig hardware wallets based on PSBT where each participant
is using different devices/systems for signing, however, that belongs in an independant
plugin that is PSBT focused and might not require a Coldcard to be present.
### Ctags
- I find this command useful (at top level) ... but I'm a VIM user.
ctags -f .tags electrum `find . -name ENV -prune -o -name \*.py`
### Working with latest ckcc-protocol
- at top level, do this:
pip install -e git+ssh://[email protected]/Coldcard/ckcc-protocol.git#egg=ckcc-protocol
- but you'll need the https version of that, not ssh like I can.
- also a branch name would be good in there
- do `pip uninstall ckcc` first
- see <https://stackoverflow.com/questions/4830856>
| PypiClean |
/AyiinXd-0.0.8-cp311-cp311-macosx_10_9_universal2.whl/fipper/node_modules/@types/node/ts4.8/timers.d.ts | declare module 'timers' {
import { Abortable } from 'node:events';
import { setTimeout as setTimeoutPromise, setImmediate as setImmediatePromise, setInterval as setIntervalPromise } from 'node:timers/promises';
interface TimerOptions extends Abortable {
/**
* Set to `false` to indicate that the scheduled `Timeout`
* should not require the Node.js event loop to remain active.
* @default true
*/
ref?: boolean | undefined;
}
let setTimeout: typeof global.setTimeout;
let clearTimeout: typeof global.clearTimeout;
let setInterval: typeof global.setInterval;
let clearInterval: typeof global.clearInterval;
let setImmediate: typeof global.setImmediate;
let clearImmediate: typeof global.clearImmediate;
global {
namespace NodeJS {
// compatibility with older typings
interface Timer extends RefCounted {
hasRef(): boolean;
refresh(): this;
[Symbol.toPrimitive](): number;
}
interface Immediate extends RefCounted {
/**
* If true, the `Immediate` object will keep the Node.js event loop active.
* @since v11.0.0
*/
hasRef(): boolean;
_onImmediate: Function; // to distinguish it from the Timeout class
}
interface Timeout extends Timer {
/**
* If true, the `Timeout` object will keep the Node.js event loop active.
* @since v11.0.0
*/
hasRef(): boolean;
/**
* Sets the timer's start time to the current time, and reschedules the timer to
* call its callback at the previously specified duration adjusted to the current
* time. This is useful for refreshing a timer without allocating a new
* JavaScript object.
*
* Using this on a timer that has already called its callback will reactivate the
* timer.
* @since v10.2.0
* @return a reference to `timeout`
*/
refresh(): this;
[Symbol.toPrimitive](): number;
}
}
function setTimeout<TArgs extends any[]>(callback: (...args: TArgs) => void, ms?: number, ...args: TArgs): NodeJS.Timeout;
// util.promisify no rest args compability
// tslint:disable-next-line void-return
function setTimeout(callback: (args: void) => void, ms?: number): NodeJS.Timeout;
namespace setTimeout {
const __promisify__: typeof setTimeoutPromise;
}
function clearTimeout(timeoutId: NodeJS.Timeout | string | number | undefined): void;
function setInterval<TArgs extends any[]>(callback: (...args: TArgs) => void, ms?: number, ...args: TArgs): NodeJS.Timer;
// util.promisify no rest args compability
// tslint:disable-next-line void-return
function setInterval(callback: (args: void) => void, ms?: number): NodeJS.Timer;
namespace setInterval {
const __promisify__: typeof setIntervalPromise;
}
function clearInterval(intervalId: NodeJS.Timeout | string | number | undefined): void;
function setImmediate<TArgs extends any[]>(callback: (...args: TArgs) => void, ...args: TArgs): NodeJS.Immediate;
// util.promisify no rest args compability
// tslint:disable-next-line void-return
function setImmediate(callback: (args: void) => void): NodeJS.Immediate;
namespace setImmediate {
const __promisify__: typeof setImmediatePromise;
}
function clearImmediate(immediateId: NodeJS.Immediate | undefined): void;
function queueMicrotask(callback: () => void): void;
}
}
declare module 'node:timers' {
export * from 'timers';
} | PypiClean |
/ESMValTool-2.9.0-py3-none-any.whl/esmvaltool/utils/testing/regression/summarize.py | import argparse
import datetime
import textwrap
from pathlib import Path
import yaml
def read_resource_usage_file(recipe_dir):
"""Read resource usage from the log."""
resource_file = recipe_dir / 'run' / 'resource_usage.txt'
usage = {}
if not resource_file.exists():
return usage
text = resource_file.read_text().strip()
if not text:
return usage
lines = text.split('\n')
for name in lines[0].split('\t'):
usage[name] = []
for line in lines[1:]:
for key, value in zip(usage, line.split('\t')):
if key != 'Date and time (UTC)':
value = float(value)
usage[key].append(value)
return usage
def get_runtime_from_debug(recipe_dir):
"""Try to read the runtime from the debug log."""
debug_file = recipe_dir / 'run' / 'main_log_debug.txt'
if not debug_file.exists():
return None
text = debug_file.read_text().strip()
if not text:
return None
lines = text.split('\n')
fmt = "%Y-%m-%d %H:%M:%S"
end_date = None
for line in lines[::-1]:
try:
end_date = datetime.datetime.strptime(line[:19], fmt)
except ValueError:
pass
else:
break
if end_date is None:
return None
start_date = datetime.datetime.strptime(lines[0][:19], fmt)
runtime = end_date - start_date
runtime = datetime.timedelta(seconds=round(runtime.total_seconds()))
return runtime
def get_resource_usage(recipe_dir):
"""Get recipe runtime (minutes), max memory (GB), avg CPU."""
resource_usage = read_resource_usage_file(recipe_dir)
if not resource_usage or not resource_usage['Real time (s)']:
runtime = get_runtime_from_debug(recipe_dir)
runtime = "" if runtime is None else f"{runtime}"
return [runtime, '', '']
runtime = resource_usage['Real time (s)'][-1]
avg_cpu = resource_usage['CPU time (s)'][-1] / runtime * 100.
runtime = datetime.timedelta(seconds=round(runtime))
memory = max(resource_usage['Memory (GB)'])
return [f"{runtime}", f"{memory:.1f}", f"{avg_cpu:.1f}"]
def get_first_figure(recipe_dir):
"""Get the first figure."""
plot_dir = recipe_dir / 'plots'
figures = plot_dir.glob("**/*.png")
try:
return next(figures)
except StopIteration:
return None
def get_recipe_name(recipe_dir):
"""Extract recipe name from output dir."""
return recipe_dir.stem[7:-16]
def get_title_and_description(recipe_dir):
"""Get recipe title and description."""
name = get_recipe_name(recipe_dir)
recipe_file = recipe_dir / 'run' / f'recipe_{name}.yml'
with open(recipe_file, 'rb') as file:
recipe = yaml.safe_load(file)
docs = recipe['documentation']
title = docs.get('title', name.replace('_', ' ').title())
return title, docs['description']
def link(url, text):
"""Format text as html link."""
return '<a href="' + url + '">' + text + '</a>'
def tr(entries):
"""Format text entries as html table row."""
return "<tr>" + " ".join(entries) + "</tr>"
def th(txt):
"""Format text as html table header."""
return "<th>" + txt + "</th>"
def td(txt):
"""Format text as html table data."""
return "<td>" + txt + "</td>"
def div(txt, class_):
"""Format text as html div."""
return f"<div class='{class_}'>{txt}</div>"
def generate_summary(output_dir):
"""Generate the lines of text for the debug summary view."""
lines = []
column_titles = [
"status",
"recipe output",
"run date",
"estimated run duration",
"estimated max memory (GB)",
"average cpu",
]
lines.append(tr(th(txt) for txt in column_titles))
for recipe_dir in sorted(Path(output_dir).glob('recipe_*')):
log = recipe_dir / 'run' / 'main_log.txt'
success = log.read_text().endswith('Run was successful\n')
if success:
status = 'success'
else:
debug_log = f"{recipe_dir.name}/run/main_log_debug.txt"
status = "failed (" + link(debug_log, 'debug') + ")"
name = recipe_dir.name[:-16]
date = datetime.datetime.strptime(recipe_dir.name[-15:],
"%Y%m%d_%H%M%S")
resource_usage = get_resource_usage(recipe_dir)
entry = []
entry.append(status)
entry.append(link(recipe_dir.name, name))
entry.append(str(date))
entry.extend(resource_usage)
entry_txt = tr(td(txt) for txt in entry)
lines.append(entry_txt)
return lines
def generate_overview(output_dir):
"""Generate the lines of text for the overview page."""
recipes = {}
def get_date(recipe_dir):
return datetime.datetime.strptime(recipe_dir.stem[-15:],
"%Y%m%d_%H%M%S")
for recipe_dir in sorted(Path(output_dir).glob('recipe_*')):
log = recipe_dir / 'run' / 'main_log.txt'
success = log.read_text().endswith('Run was successful\n')
if not success:
continue
name = get_recipe_name(recipe_dir)
if name not in recipes:
recipes[name] = []
recipes[name].append(recipe_dir)
for name, recipe_dirs in recipes.items():
recipes[name] = sorted(recipe_dirs, key=get_date)[-1]
print(f"Found {len(recipes)} recipes")
lines = []
for name, recipe_dir in recipes.items():
title, description = get_title_and_description(recipe_dir)
figure = get_first_figure(recipe_dir)
recipe_url = recipe_dir.relative_to(output_dir)
entry_txt = div(
div(
"\n".join([
f"<img src='{figure.relative_to(output_dir)}' "
"class='card-img-top'/>" if figure else "",
div(
"\n".join([
f'<h5 class="card-title">{title}</h5>',
f'<p class="card-text">{description} '
f'<a href="{recipe_url}">'
'<i class="bi bi-arrow-right-circle"></i>'
'</a></p>',
]),
"card-body",
),
]),
"card",
),
"col",
)
lines.append(entry_txt)
return lines
def write_debug_html(lines, output_dir):
"""Write lines to debug.html."""
header = textwrap.dedent("""
<!doctype html>
<html>
<head>
<title>ESMValTool recipes</title>
</head>
<style>
#recipes {
font-family: Arial, Helvetica, sans-serif;
border-collapse: collapse;
width: 100%;
}
#recipes td, #recipes th {
border: 1px solid #ddd;
padding: 8px;
}
#recipes tr:nth-child(even){background-color: #f2f2f2;}
#recipes tr:hover {background-color: #ddd;}
#recipes th {
padding-top: 12px;
padding-bottom: 12px;
text-align: left;
background-color: hsl(200, 50%, 50%);
color: white;
}
</style>
<body>
<table id="recipes">
""")
footer = textwrap.dedent("""
</table>
</body>
</html>
""")
lines = [" " + line for line in lines]
text = header + "\n".join(lines) + footer
index_file = output_dir / 'debug.html'
index_file.write_text(text)
print(f"Wrote file://{index_file.absolute()}")
def write_index_html(lines, output_dir):
"""Write lines to index.html."""
header = textwrap.dedent("""
<!doctype html>
<html lang="en">
<head>
<!-- Required meta tags -->
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- Bootstrap CSS -->
<link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-1BmE4kWBq78iYhFldvKuhfTAU6auU8tT94WrHftjDbrCEXSU1oBoqyl2QvZ6jIW3" crossorigin="anonymous">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/font/bootstrap-icons.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<title>ESMValTool results</title>
</head>
<body>
<div class="container-fluid">
<h1>
<img src="https://github.com/ESMValGroup/ESMValTool/raw/main/doc/sphinx/source/figures/ESMValTool-logo-2.png" class="img-fluid">
</h1>
<p>
See <a href=https://docs.esmvaltool.org/en/latest/recipes/index.html>Available recipes</a>
for a description of these recipes.
Missing something? Have a look at the <a href=debug.html>debug page</a>.
<p>
<input class="form-control searchbox-input" type="text" placeholder="Type something here to search...">
<br>
<div class="row row-cols-1 row-cols-md-3 g-4">
""") # noqa: E501
footer = textwrap.dedent("""
</div>
</div>
<script>
$(document).ready(function(){
$('.searchbox-input').on("keyup", function() {
var value = $(this).val().toLowerCase();
$(".col").filter(function() {
$(this).toggle($(this).text().toLowerCase().indexOf(value) > -1)
});
});
});
</script>
</body>
</html>
""") # noqa: E501
lines = [" " + line for line in lines]
text = header + "\n".join(lines) + footer
index_file = output_dir / 'index.html'
index_file.write_text(text)
print(f"Wrote file://{index_file.absolute()}")
def main():
"""Run the program."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('output_dir',
default='.',
type=Path,
help='ESMValTool output directory.')
args = parser.parse_args()
write_debug_html(generate_summary(args.output_dir), args.output_dir)
write_index_html(generate_overview(args.output_dir), args.output_dir)
if __name__ == '__main__':
main() | PypiClean |
/CRUDXWORKERTEJAS1899-0.4.tar.gz/CRUDXWORKERTEJAS1899-0.4/CRUD/settings.py | from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
TEMPLATE_DIR = Path.joinpath(BASE_DIR,'template')
STATIC_DIR = Path.joinpath(BASE_DIR,'static')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-z_ct4h^b^8zr)jiyt$oz_20c36ya0=nr3$%st^m39iq6glsoyy'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crudapp'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CRUD.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CRUD.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'demo1',
'USER':'root',
'PASSWORD':'root'
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATICFILES_DIRS = [STATIC_DIR]
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField' | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/prism/components/prism-sql.js | Prism.languages.sql= {
'comment': {
pattern: /(^|[^\\])(?:\/\*[\s\S]*?\*\/|(?:--|\/\/|#).*)/,
lookbehind: true
},
'string' : {
pattern: /(^|[^@\\])("|')(?:\\[\s\S]|(?!\2)[^\\])*\2/,
greedy: true,
lookbehind: true
},
'variable': /@[\w.$]+|@(["'`])(?:\\[\s\S]|(?!\1)[^\\])+\1/,
'function': /\b(?:COUNT|SUM|AVG|MIN|MAX|FIRST|LAST|UCASE|LCASE|MID|LEN|ROUND|NOW|FORMAT)(?=\s*\()/i, // Should we highlight user defined functions too?
'keyword': /\b(?:ACTION|ADD|AFTER|ALGORITHM|ALL|ALTER|ANALYZE|ANY|APPLY|AS|ASC|AUTHORIZATION|AUTO_INCREMENT|BACKUP|BDB|BEGIN|BERKELEYDB|BIGINT|BINARY|BIT|BLOB|BOOL|BOOLEAN|BREAK|BROWSE|BTREE|BULK|BY|CALL|CASCADED?|CASE|CHAIN|CHAR VARYING|CHARACTER (?:SET|VARYING)|CHARSET|CHECK|CHECKPOINT|CLOSE|CLUSTERED|COALESCE|COLLATE|COLUMN|COLUMNS|COMMENT|COMMIT|COMMITTED|COMPUTE|CONNECT|CONSISTENT|CONSTRAINT|CONTAINS|CONTAINSTABLE|CONTINUE|CONVERT|CREATE|CROSS|CURRENT(?:_DATE|_TIME|_TIMESTAMP|_USER)?|CURSOR|DATA(?:BASES?)?|DATE(?:TIME)?|DBCC|DEALLOCATE|DEC|DECIMAL|DECLARE|DEFAULT|DEFINER|DELAYED|DELETE|DELIMITER(?:S)?|DENY|DESC|DESCRIBE|DETERMINISTIC|DISABLE|DISCARD|DISK|DISTINCT|DISTINCTROW|DISTRIBUTED|DO|DOUBLE(?: PRECISION)?|DROP|DUMMY|DUMP(?:FILE)?|DUPLICATE KEY|ELSE|ENABLE|ENCLOSED BY|END|ENGINE|ENUM|ERRLVL|ERRORS|ESCAPE(?:D BY)?|EXCEPT|EXEC(?:UTE)?|EXISTS|EXIT|EXPLAIN|EXTENDED|FETCH|FIELDS|FILE|FILLFACTOR|FIRST|FIXED|FLOAT|FOLLOWING|FOR(?: EACH ROW)?|FORCE|FOREIGN|FREETEXT(?:TABLE)?|FROM|FULL|FUNCTION|GEOMETRY(?:COLLECTION)?|GLOBAL|GOTO|GRANT|GROUP|HANDLER|HASH|HAVING|HOLDLOCK|IDENTITY(?:_INSERT|COL)?|IF|IGNORE|IMPORT|INDEX|INFILE|INNER|INNODB|INOUT|INSERT|INT|INTEGER|INTERSECT|INTO|INVOKER|ISOLATION LEVEL|JOIN|KEYS?|KILL|LANGUAGE SQL|LAST|LEFT|LIMIT|LINENO|LINES|LINESTRING|LOAD|LOCAL|LOCK|LONG(?:BLOB|TEXT)|MATCH(?:ED)?|MEDIUM(?:BLOB|INT|TEXT)|MERGE|MIDDLEINT|MODIFIES SQL DATA|MODIFY|MULTI(?:LINESTRING|POINT|POLYGON)|NATIONAL(?: CHAR VARYING| CHARACTER(?: VARYING)?| VARCHAR)?|NATURAL|NCHAR(?: VARCHAR)?|NEXT|NO(?: SQL|CHECK|CYCLE)?|NONCLUSTERED|NULLIF|NUMERIC|OFF?|OFFSETS?|ON|OPEN(?:DATASOURCE|QUERY|ROWSET)?|OPTIMIZE|OPTION(?:ALLY)?|ORDER|OUT(?:ER|FILE)?|OVER|PARTIAL|PARTITION|PERCENT|PIVOT|PLAN|POINT|POLYGON|PRECEDING|PRECISION|PREV|PRIMARY|PRINT|PRIVILEGES|PROC(?:EDURE)?|PUBLIC|PURGE|QUICK|RAISERROR|READ(?:S SQL DATA|TEXT)?|REAL|RECONFIGURE|REFERENCES|RELEASE|RENAME|REPEATABLE|REPLICATION|REQUIRE|RESTORE|RESTRICT|RETURNS?|REVOKE|RIGHT|ROLLBACK|ROUTINE|ROW(?:COUNT|GUIDCOL|S)?|RTREE|RULE|SAVE(?:POINT)?|SCHEMA|SELECT|SERIAL(?:IZABLE)?|SESSION(?:_USER)?|SET(?:USER)?|SHARE MODE|SHOW|SHUTDOWN|SIMPLE|SMALLINT|SNAPSHOT|SOME|SONAME|START(?:ING BY)?|STATISTICS|STATUS|STRIPED|SYSTEM_USER|TABLES?|TABLESPACE|TEMP(?:ORARY|TABLE)?|TERMINATED BY|TEXT(?:SIZE)?|THEN|TIMESTAMP|TINY(?:BLOB|INT|TEXT)|TOP?|TRAN(?:SACTIONS?)?|TRIGGER|TRUNCATE|TSEQUAL|TYPES?|UNBOUNDED|UNCOMMITTED|UNDEFINED|UNION|UNIQUE|UNPIVOT|UPDATE(?:TEXT)?|USAGE|USE|USER|USING|VALUES?|VAR(?:BINARY|CHAR|CHARACTER|YING)|VIEW|WAITFOR|WARNINGS|WHEN|WHERE|WHILE|WITH(?: ROLLUP|IN)?|WORK|WRITE(?:TEXT)?)\b/i,
'boolean': /\b(?:TRUE|FALSE|NULL)\b/i,
'number': /\b-?(?:0x)?\d*\.?[\da-f]+\b/,
'operator': /[-+*\/=%^~]|&&?|\|\|?|!=?|<(?:=>?|<|>)?|>[>=]?|\b(?:AND|BETWEEN|IN|LIKE|NOT|OR|IS|DIV|REGEXP|RLIKE|SOUNDS LIKE|XOR)\b/i,
'punctuation': /[;[\]()`,.]/
}; | PypiClean |
/MarkDo-0.3.0.tar.gz/MarkDo-0.3.0/markdo/static/bower/codemirror/mode/javascript/javascript.js |
CodeMirror.defineMode("javascript", function(config, parserConfig) {
var indentUnit = config.indentUnit;
var jsonMode = parserConfig.json;
var isTS = parserConfig.typescript;
// Tokenizer
var keywords = function(){
function kw(type) {return {type: type, style: "keyword"};}
var A = kw("keyword a"), B = kw("keyword b"), C = kw("keyword c");
var operator = kw("operator"), atom = {type: "atom", style: "atom"};
var jsKeywords = {
"if": A, "while": A, "with": A, "else": B, "do": B, "try": B, "finally": B,
"return": C, "break": C, "continue": C, "new": C, "delete": C, "throw": C,
"var": kw("var"), "const": kw("var"), "let": kw("var"),
"function": kw("function"), "catch": kw("catch"),
"for": kw("for"), "switch": kw("switch"), "case": kw("case"), "default": kw("default"),
"in": operator, "typeof": operator, "instanceof": operator,
"true": atom, "false": atom, "null": atom, "undefined": atom, "NaN": atom, "Infinity": atom
};
// Extend the 'normal' keywords with the TypeScript language extensions
if (isTS) {
var type = {type: "variable", style: "variable-3"};
var tsKeywords = {
// object-like things
"interface": kw("interface"),
"class": kw("class"),
"extends": kw("extends"),
"constructor": kw("constructor"),
// scope modifiers
"public": kw("public"),
"private": kw("private"),
"protected": kw("protected"),
"static": kw("static"),
"super": kw("super"),
// types
"string": type, "number": type, "bool": type, "any": type
};
for (var attr in tsKeywords) {
jsKeywords[attr] = tsKeywords[attr];
}
}
return jsKeywords;
}();
var isOperatorChar = /[+\-*&%=<>!?|]/;
function chain(stream, state, f) {
state.tokenize = f;
return f(stream, state);
}
function nextUntilUnescaped(stream, end) {
var escaped = false, next;
while ((next = stream.next()) != null) {
if (next == end && !escaped)
return false;
escaped = !escaped && next == "\\";
}
return escaped;
}
// Used as scratch variables to communicate multiple values without
// consing up tons of objects.
var type, content;
function ret(tp, style, cont) {
type = tp; content = cont;
return style;
}
function jsTokenBase(stream, state) {
var ch = stream.next();
if (ch == '"' || ch == "'")
return chain(stream, state, jsTokenString(ch));
else if (/[\[\]{}\(\),;\:\.]/.test(ch))
return ret(ch);
else if (ch == "0" && stream.eat(/x/i)) {
stream.eatWhile(/[\da-f]/i);
return ret("number", "number");
}
else if (/\d/.test(ch) || ch == "-" && stream.eat(/\d/)) {
stream.match(/^\d*(?:\.\d*)?(?:[eE][+\-]?\d+)?/);
return ret("number", "number");
}
else if (ch == "/") {
if (stream.eat("*")) {
return chain(stream, state, jsTokenComment);
}
else if (stream.eat("/")) {
stream.skipToEnd();
return ret("comment", "comment");
}
else if (state.lastType == "operator" || state.lastType == "keyword c" ||
/^[\[{}\(,;:]$/.test(state.lastType)) {
nextUntilUnescaped(stream, "/");
stream.eatWhile(/[gimy]/); // 'y' is "sticky" option in Mozilla
return ret("regexp", "string-2");
}
else {
stream.eatWhile(isOperatorChar);
return ret("operator", null, stream.current());
}
}
else if (ch == "#") {
stream.skipToEnd();
return ret("error", "error");
}
else if (isOperatorChar.test(ch)) {
stream.eatWhile(isOperatorChar);
return ret("operator", null, stream.current());
}
else {
stream.eatWhile(/[\w\$_]/);
var word = stream.current(), known = keywords.propertyIsEnumerable(word) && keywords[word];
return (known && state.lastType != ".") ? ret(known.type, known.style, word) :
ret("variable", "variable", word);
}
}
function jsTokenString(quote) {
return function(stream, state) {
if (!nextUntilUnescaped(stream, quote))
state.tokenize = jsTokenBase;
return ret("string", "string");
};
}
function jsTokenComment(stream, state) {
var maybeEnd = false, ch;
while (ch = stream.next()) {
if (ch == "/" && maybeEnd) {
state.tokenize = jsTokenBase;
break;
}
maybeEnd = (ch == "*");
}
return ret("comment", "comment");
}
// Parser
var atomicTypes = {"atom": true, "number": true, "variable": true, "string": true, "regexp": true};
function JSLexical(indented, column, type, align, prev, info) {
this.indented = indented;
this.column = column;
this.type = type;
this.prev = prev;
this.info = info;
if (align != null) this.align = align;
}
function inScope(state, varname) {
for (var v = state.localVars; v; v = v.next)
if (v.name == varname) return true;
}
function parseJS(state, style, type, content, stream) {
var cc = state.cc;
// Communicate our context to the combinators.
// (Less wasteful than consing up a hundred closures on every call.)
cx.state = state; cx.stream = stream; cx.marked = null, cx.cc = cc;
if (!state.lexical.hasOwnProperty("align"))
state.lexical.align = true;
while(true) {
var combinator = cc.length ? cc.pop() : jsonMode ? expression : statement;
if (combinator(type, content)) {
while(cc.length && cc[cc.length - 1].lex)
cc.pop()();
if (cx.marked) return cx.marked;
if (type == "variable" && inScope(state, content)) return "variable-2";
return style;
}
}
}
// Combinator utils
var cx = {state: null, column: null, marked: null, cc: null};
function pass() {
for (var i = arguments.length - 1; i >= 0; i--) cx.cc.push(arguments[i]);
}
function cont() {
pass.apply(null, arguments);
return true;
}
function register(varname) {
function inList(list) {
for (var v = list; v; v = v.next)
if (v.name == varname) return true;
return false;
}
var state = cx.state;
if (state.context) {
cx.marked = "def";
if (inList(state.localVars)) return;
state.localVars = {name: varname, next: state.localVars};
} else {
if (inList(state.globalVars)) return;
state.globalVars = {name: varname, next: state.globalVars};
}
}
// Combinators
var defaultVars = {name: "this", next: {name: "arguments"}};
function pushcontext() {
cx.state.context = {prev: cx.state.context, vars: cx.state.localVars};
cx.state.localVars = defaultVars;
}
function popcontext() {
cx.state.localVars = cx.state.context.vars;
cx.state.context = cx.state.context.prev;
}
function pushlex(type, info) {
var result = function() {
var state = cx.state;
state.lexical = new JSLexical(state.indented, cx.stream.column(), type, null, state.lexical, info);
};
result.lex = true;
return result;
}
function poplex() {
var state = cx.state;
if (state.lexical.prev) {
if (state.lexical.type == ")")
state.indented = state.lexical.indented;
state.lexical = state.lexical.prev;
}
}
poplex.lex = true;
function expect(wanted) {
return function expecting(type) {
if (type == wanted) return cont();
else if (wanted == ";") return pass();
else return cont(arguments.callee);
};
}
function statement(type) {
if (type == "var") return cont(pushlex("vardef"), vardef1, expect(";"), poplex);
if (type == "keyword a") return cont(pushlex("form"), expression, statement, poplex);
if (type == "keyword b") return cont(pushlex("form"), statement, poplex);
if (type == "{") return cont(pushlex("}"), block, poplex);
if (type == ";") return cont();
if (type == "function") return cont(functiondef);
if (type == "for") return cont(pushlex("form"), expect("("), pushlex(")"), forspec1, expect(")"),
poplex, statement, poplex);
if (type == "variable") return cont(pushlex("stat"), maybelabel);
if (type == "switch") return cont(pushlex("form"), expression, pushlex("}", "switch"), expect("{"),
block, poplex, poplex);
if (type == "case") return cont(expression, expect(":"));
if (type == "default") return cont(expect(":"));
if (type == "catch") return cont(pushlex("form"), pushcontext, expect("("), funarg, expect(")"),
statement, poplex, popcontext);
return pass(pushlex("stat"), expression, expect(";"), poplex);
}
function expression(type) {
if (atomicTypes.hasOwnProperty(type)) return cont(maybeoperator);
if (type == "function") return cont(functiondef);
if (type == "keyword c") return cont(maybeexpression);
if (type == "(") return cont(pushlex(")"), maybeexpression, expect(")"), poplex, maybeoperator);
if (type == "operator") return cont(expression);
if (type == "[") return cont(pushlex("]"), commasep(expression, "]"), poplex, maybeoperator);
if (type == "{") return cont(pushlex("}"), commasep(objprop, "}"), poplex, maybeoperator);
return cont();
}
function maybeexpression(type) {
if (type.match(/[;\}\)\],]/)) return pass();
return pass(expression);
}
function maybeoperator(type, value) {
if (type == "operator" && /\+\+|--/.test(value)) return cont(maybeoperator);
if (type == "operator" && value == "?") return cont(expression, expect(":"), expression);
if (type == ";") return;
if (type == "(") return cont(pushlex(")"), commasep(expression, ")"), poplex, maybeoperator);
if (type == ".") return cont(property, maybeoperator);
if (type == "[") return cont(pushlex("]"), expression, expect("]"), poplex, maybeoperator);
}
function maybelabel(type) {
if (type == ":") return cont(poplex, statement);
return pass(maybeoperator, expect(";"), poplex);
}
function property(type) {
if (type == "variable") {cx.marked = "property"; return cont();}
}
function objprop(type) {
if (type == "variable") cx.marked = "property";
if (atomicTypes.hasOwnProperty(type)) return cont(expect(":"), expression);
}
function commasep(what, end) {
function proceed(type) {
if (type == ",") return cont(what, proceed);
if (type == end) return cont();
return cont(expect(end));
}
return function commaSeparated(type) {
if (type == end) return cont();
else return pass(what, proceed);
};
}
function block(type) {
if (type == "}") return cont();
return pass(statement, block);
}
function maybetype(type) {
if (type == ":") return cont(typedef);
return pass();
}
function typedef(type) {
if (type == "variable"){cx.marked = "variable-3"; return cont();}
return pass();
}
function vardef1(type, value) {
if (type == "variable") {
register(value);
return isTS ? cont(maybetype, vardef2) : cont(vardef2);
}
return pass();
}
function vardef2(type, value) {
if (value == "=") return cont(expression, vardef2);
if (type == ",") return cont(vardef1);
}
function forspec1(type) {
if (type == "var") return cont(vardef1, expect(";"), forspec2);
if (type == ";") return cont(forspec2);
if (type == "variable") return cont(formaybein);
return cont(forspec2);
}
function formaybein(type, value) {
if (value == "in") return cont(expression);
return cont(maybeoperator, forspec2);
}
function forspec2(type, value) {
if (type == ";") return cont(forspec3);
if (value == "in") return cont(expression);
return cont(expression, expect(";"), forspec3);
}
function forspec3(type) {
if (type != ")") cont(expression);
}
function functiondef(type, value) {
if (type == "variable") {register(value); return cont(functiondef);}
if (type == "(") return cont(pushlex(")"), pushcontext, commasep(funarg, ")"), poplex, statement, popcontext);
}
function funarg(type, value) {
if (type == "variable") {register(value); return isTS ? cont(maybetype) : cont();}
}
// Interface
return {
startState: function(basecolumn) {
return {
tokenize: jsTokenBase,
lastType: null,
cc: [],
lexical: new JSLexical((basecolumn || 0) - indentUnit, 0, "block", false),
localVars: parserConfig.localVars,
globalVars: parserConfig.globalVars,
context: parserConfig.localVars && {vars: parserConfig.localVars},
indented: 0
};
},
token: function(stream, state) {
if (stream.sol()) {
if (!state.lexical.hasOwnProperty("align"))
state.lexical.align = false;
state.indented = stream.indentation();
}
if (stream.eatSpace()) return null;
var style = state.tokenize(stream, state);
if (type == "comment") return style;
state.lastType = type;
return parseJS(state, style, type, content, stream);
},
indent: function(state, textAfter) {
if (state.tokenize == jsTokenComment) return CodeMirror.Pass;
if (state.tokenize != jsTokenBase) return 0;
var firstChar = textAfter && textAfter.charAt(0), lexical = state.lexical;
if (lexical.type == "stat" && firstChar == "}") lexical = lexical.prev;
var type = lexical.type, closing = firstChar == type;
if (type == "vardef") return lexical.indented + (state.lastType == "operator" || state.lastType == "," ? 4 : 0);
else if (type == "form" && firstChar == "{") return lexical.indented;
else if (type == "form") return lexical.indented + indentUnit;
else if (type == "stat")
return lexical.indented + (state.lastType == "operator" || state.lastType == "," ? indentUnit : 0);
else if (lexical.info == "switch" && !closing)
return lexical.indented + (/^(?:case|default)\b/.test(textAfter) ? indentUnit : 2 * indentUnit);
else if (lexical.align) return lexical.column + (closing ? 0 : 1);
else return lexical.indented + (closing ? 0 : indentUnit);
},
electricChars: ":{}",
jsonMode: jsonMode
};
});
CodeMirror.defineMIME("text/javascript", "javascript");
CodeMirror.defineMIME("text/ecmascript", "javascript");
CodeMirror.defineMIME("application/javascript", "javascript");
CodeMirror.defineMIME("application/ecmascript", "javascript");
CodeMirror.defineMIME("application/json", {name: "javascript", json: true});
CodeMirror.defineMIME("text/typescript", { name: "javascript", typescript: true });
CodeMirror.defineMIME("application/typescript", { name: "javascript", typescript: true }); | PypiClean |
/GB2260-v2-0.2.1.tar.gz/GB2260-v2-0.2.1/gb2260_v2/data/curated/revision_201810.py | from __future__ import unicode_literals
name = '201810'
division_schema = {
'110000': '北京市',
'110100': '市辖区',
'110101': '东城区',
'110102': '西城区',
'110105': '朝阳区',
'110106': '丰台区',
'110107': '石景山区',
'110108': '海淀区',
'110109': '门头沟区',
'110111': '房山区',
'110112': '通州区',
'110113': '顺义区',
'110114': '昌平区',
'110115': '大兴区',
'110116': '怀柔区',
'110117': '平谷区',
'110118': '密云区',
'110119': '延庆区',
'120000': '天津市',
'120100': '市辖区',
'120101': '和平区',
'120102': '河东区',
'120103': '河西区',
'120104': '南开区',
'120105': '河北区',
'120106': '红桥区',
'120110': '东丽区',
'120111': '西青区',
'120112': '津南区',
'120113': '北辰区',
'120114': '武清区',
'120115': '宝坻区',
'120116': '滨海新区',
'120117': '宁河区',
'120118': '静海区',
'120119': '蓟州区',
'130000': '河北省',
'130100': '石家庄市',
'130101': '市辖区',
'130102': '长安区',
'130104': '桥西区',
'130105': '新华区',
'130107': '井陉矿区',
'130108': '裕华区',
'130109': '藁城区',
'130110': '鹿泉区',
'130111': '栾城区',
'130121': '井陉县',
'130123': '正定县',
'130125': '行唐县',
'130126': '灵寿县',
'130127': '高邑县',
'130128': '深泽县',
'130129': '赞皇县',
'130130': '无极县',
'130131': '平山县',
'130132': '元氏县',
'130133': '赵县',
'130171': '石家庄高新技术产业开发区',
'130172': '石家庄循环化工园区',
'130181': '辛集市',
'130183': '晋州市',
'130184': '新乐市',
'130200': '唐山市',
'130201': '市辖区',
'130202': '路南区',
'130203': '路北区',
'130204': '古冶区',
'130205': '开平区',
'130207': '丰南区',
'130208': '丰润区',
'130209': '曹妃甸区',
'130224': '滦南县',
'130225': '乐亭县',
'130227': '迁西县',
'130229': '玉田县',
'130271': '唐山市芦台经济技术开发区',
'130272': '唐山市汉沽管理区',
'130273': '唐山高新技术产业开发区',
'130274': '河北唐山海港经济开发区',
'130281': '遵化市',
'130283': '迁安市',
'130284': '滦州市',
'130300': '秦皇岛市',
'130301': '市辖区',
'130302': '海港区',
'130303': '山海关区',
'130304': '北戴河区',
'130306': '抚宁区',
'130321': '青龙满族自治县',
'130322': '昌黎县',
'130324': '卢龙县',
'130371': '秦皇岛市经济技术开发区',
'130372': '北戴河新区',
'130400': '邯郸市',
'130401': '市辖区',
'130402': '邯山区',
'130403': '丛台区',
'130404': '复兴区',
'130406': '峰峰矿区',
'130407': '肥乡区',
'130408': '永年区',
'130423': '临漳县',
'130424': '成安县',
'130425': '大名县',
'130426': '涉县',
'130427': '磁县',
'130430': '邱县',
'130431': '鸡泽县',
'130432': '广平县',
'130433': '馆陶县',
'130434': '魏县',
'130435': '曲周县',
'130471': '邯郸经济技术开发区',
'130473': '邯郸冀南新区',
'130481': '武安市',
'130500': '邢台市',
'130501': '市辖区',
'130502': '桥东区',
'130503': '桥西区',
'130521': '邢台县',
'130522': '临城县',
'130523': '内丘县',
'130524': '柏乡县',
'130525': '隆尧县',
'130526': '任县',
'130527': '南和县',
'130528': '宁晋县',
'130529': '巨鹿县',
'130530': '新河县',
'130531': '广宗县',
'130532': '平乡县',
'130533': '威县',
'130534': '清河县',
'130535': '临西县',
'130571': '河北邢台经济开发区',
'130581': '南宫市',
'130582': '沙河市',
'130600': '保定市',
'130601': '市辖区',
'130602': '竞秀区',
'130606': '莲池区',
'130607': '满城区',
'130608': '清苑区',
'130609': '徐水区',
'130623': '涞水县',
'130624': '阜平县',
'130626': '定兴县',
'130627': '唐县',
'130628': '高阳县',
'130629': '容城县',
'130630': '涞源县',
'130631': '望都县',
'130632': '安新县',
'130633': '易县',
'130634': '曲阳县',
'130635': '蠡县',
'130636': '顺平县',
'130637': '博野县',
'130638': '雄县',
'130671': '保定高新技术产业开发区',
'130672': '保定白沟新城',
'130681': '涿州市',
'130682': '定州市',
'130683': '安国市',
'130684': '高碑店市',
'130700': '张家口市',
'130701': '市辖区',
'130702': '桥东区',
'130703': '桥西区',
'130705': '宣化区',
'130706': '下花园区',
'130708': '万全区',
'130709': '崇礼区',
'130722': '张北县',
'130723': '康保县',
'130724': '沽源县',
'130725': '尚义县',
'130726': '蔚县',
'130727': '阳原县',
'130728': '怀安县',
'130730': '怀来县',
'130731': '涿鹿县',
'130732': '赤城县',
'130771': '张家口市高新技术产业开发区',
'130772': '张家口市察北管理区',
'130773': '张家口市塞北管理区',
'130800': '承德市',
'130801': '市辖区',
'130802': '双桥区',
'130803': '双滦区',
'130804': '鹰手营子矿区',
'130821': '承德县',
'130822': '兴隆县',
'130824': '滦平县',
'130825': '隆化县',
'130826': '丰宁满族自治县',
'130827': '宽城满族自治县',
'130828': '围场满族蒙古族自治县',
'130871': '承德高新技术产业开发区',
'130881': '平泉市',
'130900': '沧州市',
'130901': '市辖区',
'130902': '新华区',
'130903': '运河区',
'130921': '沧县',
'130922': '青县',
'130923': '东光县',
'130924': '海兴县',
'130925': '盐山县',
'130926': '肃宁县',
'130927': '南皮县',
'130928': '吴桥县',
'130929': '献县',
'130930': '孟村回族自治县',
'130971': '河北沧州经济开发区',
'130972': '沧州高新技术产业开发区',
'130973': '沧州渤海新区',
'130981': '泊头市',
'130982': '任丘市',
'130983': '黄骅市',
'130984': '河间市',
'131000': '廊坊市',
'131001': '市辖区',
'131002': '安次区',
'131003': '广阳区',
'131022': '固安县',
'131023': '永清县',
'131024': '香河县',
'131025': '大城县',
'131026': '文安县',
'131028': '大厂回族自治县',
'131071': '廊坊经济技术开发区',
'131081': '霸州市',
'131082': '三河市',
'131100': '衡水市',
'131101': '市辖区',
'131102': '桃城区',
'131103': '冀州区',
'131121': '枣强县',
'131122': '武邑县',
'131123': '武强县',
'131124': '饶阳县',
'131125': '安平县',
'131126': '故城县',
'131127': '景县',
'131128': '阜城县',
'131171': '河北衡水高新技术产业开发区',
'131172': '衡水滨湖新区',
'131182': '深州市',
'140000': '山西省',
'140100': '太原市',
'140101': '市辖区',
'140105': '小店区',
'140106': '迎泽区',
'140107': '杏花岭区',
'140108': '尖草坪区',
'140109': '万柏林区',
'140110': '晋源区',
'140121': '清徐县',
'140122': '阳曲县',
'140123': '娄烦县',
'140171': '山西转型综合改革示范区',
'140181': '古交市',
'140200': '大同市',
'140201': '市辖区',
'140212': '新荣区',
'140213': '平城区',
'140214': '云冈区',
'140215': '云州区',
'140221': '阳高县',
'140222': '天镇县',
'140223': '广灵县',
'140224': '灵丘县',
'140225': '浑源县',
'140226': '左云县',
'140271': '山西大同经济开发区',
'140300': '阳泉市',
'140301': '市辖区',
'140302': '城区',
'140303': '矿区',
'140311': '郊区',
'140321': '平定县',
'140322': '盂县',
'140400': '长治市',
'140401': '市辖区',
'140403': '潞州区',
'140404': '上党区',
'140405': '屯留区',
'140406': '潞城区',
'140423': '襄垣县',
'140425': '平顺县',
'140426': '黎城县',
'140427': '壶关县',
'140428': '长子县',
'140429': '武乡县',
'140430': '沁县',
'140431': '沁源县',
'140471': '山西长治高新技术产业园区',
'140500': '晋城市',
'140501': '市辖区',
'140502': '城区',
'140521': '沁水县',
'140522': '阳城县',
'140524': '陵川县',
'140525': '泽州县',
'140581': '高平市',
'140600': '朔州市',
'140601': '市辖区',
'140602': '朔城区',
'140603': '平鲁区',
'140621': '山阴县',
'140622': '应县',
'140623': '右玉县',
'140671': '山西朔州经济开发区',
'140681': '怀仁市',
'140700': '晋中市',
'140701': '市辖区',
'140702': '榆次区',
'140721': '榆社县',
'140722': '左权县',
'140723': '和顺县',
'140724': '昔阳县',
'140725': '寿阳县',
'140726': '太谷县',
'140727': '祁县',
'140728': '平遥县',
'140729': '灵石县',
'140781': '介休市',
'140800': '运城市',
'140801': '市辖区',
'140802': '盐湖区',
'140821': '临猗县',
'140822': '万荣县',
'140823': '闻喜县',
'140824': '稷山县',
'140825': '新绛县',
'140826': '绛县',
'140827': '垣曲县',
'140828': '夏县',
'140829': '平陆县',
'140830': '芮城县',
'140881': '永济市',
'140882': '河津市',
'140900': '忻州市',
'140901': '市辖区',
'140902': '忻府区',
'140921': '定襄县',
'140922': '五台县',
'140923': '代县',
'140924': '繁峙县',
'140925': '宁武县',
'140926': '静乐县',
'140927': '神池县',
'140928': '五寨县',
'140929': '岢岚县',
'140930': '河曲县',
'140931': '保德县',
'140932': '偏关县',
'140971': '五台山风景名胜区',
'140981': '原平市',
'141000': '临汾市',
'141001': '市辖区',
'141002': '尧都区',
'141021': '曲沃县',
'141022': '翼城县',
'141023': '襄汾县',
'141024': '洪洞县',
'141025': '古县',
'141026': '安泽县',
'141027': '浮山县',
'141028': '吉县',
'141029': '乡宁县',
'141030': '大宁县',
'141031': '隰县',
'141032': '永和县',
'141033': '蒲县',
'141034': '汾西县',
'141081': '侯马市',
'141082': '霍州市',
'141100': '吕梁市',
'141101': '市辖区',
'141102': '离石区',
'141121': '文水县',
'141122': '交城县',
'141123': '兴县',
'141124': '临县',
'141125': '柳林县',
'141126': '石楼县',
'141127': '岚县',
'141128': '方山县',
'141129': '中阳县',
'141130': '交口县',
'141181': '孝义市',
'141182': '汾阳市',
'150000': '内蒙古自治区',
'150100': '呼和浩特市',
'150101': '市辖区',
'150102': '新城区',
'150103': '回民区',
'150104': '玉泉区',
'150105': '赛罕区',
'150121': '土默特左旗',
'150122': '托克托县',
'150123': '和林格尔县',
'150124': '清水河县',
'150125': '武川县',
'150171': '呼和浩特金海工业园区',
'150172': '呼和浩特经济技术开发区',
'150200': '包头市',
'150201': '市辖区',
'150202': '东河区',
'150203': '昆都仑区',
'150204': '青山区',
'150205': '石拐区',
'150206': '白云鄂博矿区',
'150207': '九原区',
'150221': '土默特右旗',
'150222': '固阳县',
'150223': '达尔罕茂明安联合旗',
'150271': '包头稀土高新技术产业开发区',
'150300': '乌海市',
'150301': '市辖区',
'150302': '海勃湾区',
'150303': '海南区',
'150304': '乌达区',
'150400': '赤峰市',
'150401': '市辖区',
'150402': '红山区',
'150403': '元宝山区',
'150404': '松山区',
'150421': '阿鲁科尔沁旗',
'150422': '巴林左旗',
'150423': '巴林右旗',
'150424': '林西县',
'150425': '克什克腾旗',
'150426': '翁牛特旗',
'150428': '喀喇沁旗',
'150429': '宁城县',
'150430': '敖汉旗',
'150500': '通辽市',
'150501': '市辖区',
'150502': '科尔沁区',
'150521': '科尔沁左翼中旗',
'150522': '科尔沁左翼后旗',
'150523': '开鲁县',
'150524': '库伦旗',
'150525': '奈曼旗',
'150526': '扎鲁特旗',
'150571': '通辽经济技术开发区',
'150581': '霍林郭勒市',
'150600': '鄂尔多斯市',
'150601': '市辖区',
'150602': '东胜区',
'150603': '康巴什区',
'150621': '达拉特旗',
'150622': '准格尔旗',
'150623': '鄂托克前旗',
'150624': '鄂托克旗',
'150625': '杭锦旗',
'150626': '乌审旗',
'150627': '伊金霍洛旗',
'150700': '呼伦贝尔市',
'150701': '市辖区',
'150702': '海拉尔区',
'150703': '扎赉诺尔区',
'150721': '阿荣旗',
'150722': '莫力达瓦达斡尔族自治旗',
'150723': '鄂伦春自治旗',
'150724': '鄂温克族自治旗',
'150725': '陈巴尔虎旗',
'150726': '新巴尔虎左旗',
'150727': '新巴尔虎右旗',
'150781': '满洲里市',
'150782': '牙克石市',
'150783': '扎兰屯市',
'150784': '额尔古纳市',
'150785': '根河市',
'150800': '巴彦淖尔市',
'150801': '市辖区',
'150802': '临河区',
'150821': '五原县',
'150822': '磴口县',
'150823': '乌拉特前旗',
'150824': '乌拉特中旗',
'150825': '乌拉特后旗',
'150826': '杭锦后旗',
'150900': '乌兰察布市',
'150901': '市辖区',
'150902': '集宁区',
'150921': '卓资县',
'150922': '化德县',
'150923': '商都县',
'150924': '兴和县',
'150925': '凉城县',
'150926': '察哈尔右翼前旗',
'150927': '察哈尔右翼中旗',
'150928': '察哈尔右翼后旗',
'150929': '四子王旗',
'150981': '丰镇市',
'152200': '兴安盟',
'152201': '乌兰浩特市',
'152202': '阿尔山市',
'152221': '科尔沁右翼前旗',
'152222': '科尔沁右翼中旗',
'152223': '扎赉特旗',
'152224': '突泉县',
'152500': '锡林郭勒盟',
'152501': '二连浩特市',
'152502': '锡林浩特市',
'152522': '阿巴嘎旗',
'152523': '苏尼特左旗',
'152524': '苏尼特右旗',
'152525': '东乌珠穆沁旗',
'152526': '西乌珠穆沁旗',
'152527': '太仆寺旗',
'152528': '镶黄旗',
'152529': '正镶白旗',
'152530': '正蓝旗',
'152531': '多伦县',
'152571': '乌拉盖管委会',
'152900': '阿拉善盟',
'152921': '阿拉善左旗',
'152922': '阿拉善右旗',
'152923': '额济纳旗',
'152971': '内蒙古阿拉善经济开发区',
'210000': '辽宁省',
'210100': '沈阳市',
'210101': '市辖区',
'210102': '和平区',
'210103': '沈河区',
'210104': '大东区',
'210105': '皇姑区',
'210106': '铁西区',
'210111': '苏家屯区',
'210112': '浑南区',
'210113': '沈北新区',
'210114': '于洪区',
'210115': '辽中区',
'210123': '康平县',
'210124': '法库县',
'210181': '新民市',
'210200': '大连市',
'210201': '市辖区',
'210202': '中山区',
'210203': '西岗区',
'210204': '沙河口区',
'210211': '甘井子区',
'210212': '旅顺口区',
'210213': '金州区',
'210214': '普兰店区',
'210224': '长海县',
'210281': '瓦房店市',
'210283': '庄河市',
'210300': '鞍山市',
'210301': '市辖区',
'210302': '铁东区',
'210303': '铁西区',
'210304': '立山区',
'210311': '千山区',
'210321': '台安县',
'210323': '岫岩满族自治县',
'210381': '海城市',
'210400': '抚顺市',
'210401': '市辖区',
'210402': '新抚区',
'210403': '东洲区',
'210404': '望花区',
'210411': '顺城区',
'210421': '抚顺县',
'210422': '新宾满族自治县',
'210423': '清原满族自治县',
'210500': '本溪市',
'210501': '市辖区',
'210502': '平山区',
'210503': '溪湖区',
'210504': '明山区',
'210505': '南芬区',
'210521': '本溪满族自治县',
'210522': '桓仁满族自治县',
'210600': '丹东市',
'210601': '市辖区',
'210602': '元宝区',
'210603': '振兴区',
'210604': '振安区',
'210624': '宽甸满族自治县',
'210681': '东港市',
'210682': '凤城市',
'210700': '锦州市',
'210701': '市辖区',
'210702': '古塔区',
'210703': '凌河区',
'210711': '太和区',
'210726': '黑山县',
'210727': '义县',
'210781': '凌海市',
'210782': '北镇市',
'210800': '营口市',
'210801': '市辖区',
'210802': '站前区',
'210803': '西市区',
'210804': '鲅鱼圈区',
'210811': '老边区',
'210881': '盖州市',
'210882': '大石桥市',
'210900': '阜新市',
'210901': '市辖区',
'210902': '海州区',
'210903': '新邱区',
'210904': '太平区',
'210905': '清河门区',
'210911': '细河区',
'210921': '阜新蒙古族自治县',
'210922': '彰武县',
'211000': '辽阳市',
'211001': '市辖区',
'211002': '白塔区',
'211003': '文圣区',
'211004': '宏伟区',
'211005': '弓长岭区',
'211011': '太子河区',
'211021': '辽阳县',
'211081': '灯塔市',
'211100': '盘锦市',
'211101': '市辖区',
'211102': '双台子区',
'211103': '兴隆台区',
'211104': '大洼区',
'211122': '盘山县',
'211200': '铁岭市',
'211201': '市辖区',
'211202': '银州区',
'211204': '清河区',
'211221': '铁岭县',
'211223': '西丰县',
'211224': '昌图县',
'211281': '调兵山市',
'211282': '开原市',
'211300': '朝阳市',
'211301': '市辖区',
'211302': '双塔区',
'211303': '龙城区',
'211321': '朝阳县',
'211322': '建平县',
'211324': '喀喇沁左翼蒙古族自治县',
'211381': '北票市',
'211382': '凌源市',
'211400': '葫芦岛市',
'211401': '市辖区',
'211402': '连山区',
'211403': '龙港区',
'211404': '南票区',
'211421': '绥中县',
'211422': '建昌县',
'211481': '兴城市',
'220000': '吉林省',
'220100': '长春市',
'220101': '市辖区',
'220102': '南关区',
'220103': '宽城区',
'220104': '朝阳区',
'220105': '二道区',
'220106': '绿园区',
'220112': '双阳区',
'220113': '九台区',
'220122': '农安县',
'220171': '长春经济技术开发区',
'220172': '长春净月高新技术产业开发区',
'220173': '长春高新技术产业开发区',
'220174': '长春汽车经济技术开发区',
'220182': '榆树市',
'220183': '德惠市',
'220200': '吉林市',
'220201': '市辖区',
'220202': '昌邑区',
'220203': '龙潭区',
'220204': '船营区',
'220211': '丰满区',
'220221': '永吉县',
'220271': '吉林经济开发区',
'220272': '吉林高新技术产业开发区',
'220273': '吉林中国新加坡食品区',
'220281': '蛟河市',
'220282': '桦甸市',
'220283': '舒兰市',
'220284': '磐石市',
'220300': '四平市',
'220301': '市辖区',
'220302': '铁西区',
'220303': '铁东区',
'220322': '梨树县',
'220323': '伊通满族自治县',
'220381': '公主岭市',
'220382': '双辽市',
'220400': '辽源市',
'220401': '市辖区',
'220402': '龙山区',
'220403': '西安区',
'220421': '东丰县',
'220422': '东辽县',
'220500': '通化市',
'220501': '市辖区',
'220502': '东昌区',
'220503': '二道江区',
'220521': '通化县',
'220523': '辉南县',
'220524': '柳河县',
'220581': '梅河口市',
'220582': '集安市',
'220600': '白山市',
'220601': '市辖区',
'220602': '浑江区',
'220605': '江源区',
'220621': '抚松县',
'220622': '靖宇县',
'220623': '长白朝鲜族自治县',
'220681': '临江市',
'220700': '松原市',
'220701': '市辖区',
'220702': '宁江区',
'220721': '前郭尔罗斯蒙古族自治县',
'220722': '长岭县',
'220723': '乾安县',
'220771': '吉林松原经济开发区',
'220781': '扶余市',
'220800': '白城市',
'220801': '市辖区',
'220802': '洮北区',
'220821': '镇赉县',
'220822': '通榆县',
'220871': '吉林白城经济开发区',
'220881': '洮南市',
'220882': '大安市',
'222400': '延边朝鲜族自治州',
'222401': '延吉市',
'222402': '图们市',
'222403': '敦化市',
'222404': '珲春市',
'222405': '龙井市',
'222406': '和龙市',
'222424': '汪清县',
'222426': '安图县',
'230000': '黑龙江省',
'230100': '哈尔滨市',
'230101': '市辖区',
'230102': '道里区',
'230103': '南岗区',
'230104': '道外区',
'230108': '平房区',
'230109': '松北区',
'230110': '香坊区',
'230111': '呼兰区',
'230112': '阿城区',
'230113': '双城区',
'230123': '依兰县',
'230124': '方正县',
'230125': '宾县',
'230126': '巴彦县',
'230127': '木兰县',
'230128': '通河县',
'230129': '延寿县',
'230183': '尚志市',
'230184': '五常市',
'230200': '齐齐哈尔市',
'230201': '市辖区',
'230202': '龙沙区',
'230203': '建华区',
'230204': '铁锋区',
'230205': '昂昂溪区',
'230206': '富拉尔基区',
'230207': '碾子山区',
'230208': '梅里斯达斡尔族区',
'230221': '龙江县',
'230223': '依安县',
'230224': '泰来县',
'230225': '甘南县',
'230227': '富裕县',
'230229': '克山县',
'230230': '克东县',
'230231': '拜泉县',
'230281': '讷河市',
'230300': '鸡西市',
'230301': '市辖区',
'230302': '鸡冠区',
'230303': '恒山区',
'230304': '滴道区',
'230305': '梨树区',
'230306': '城子河区',
'230307': '麻山区',
'230321': '鸡东县',
'230381': '虎林市',
'230382': '密山市',
'230400': '鹤岗市',
'230401': '市辖区',
'230402': '向阳区',
'230403': '工农区',
'230404': '南山区',
'230405': '兴安区',
'230406': '东山区',
'230407': '兴山区',
'230421': '萝北县',
'230422': '绥滨县',
'230500': '双鸭山市',
'230501': '市辖区',
'230502': '尖山区',
'230503': '岭东区',
'230505': '四方台区',
'230506': '宝山区',
'230521': '集贤县',
'230522': '友谊县',
'230523': '宝清县',
'230524': '饶河县',
'230600': '大庆市',
'230601': '市辖区',
'230602': '萨尔图区',
'230603': '龙凤区',
'230604': '让胡路区',
'230605': '红岗区',
'230606': '大同区',
'230621': '肇州县',
'230622': '肇源县',
'230623': '林甸县',
'230624': '杜尔伯特蒙古族自治县',
'230671': '大庆高新技术产业开发区',
'230700': '伊春市',
'230701': '市辖区',
'230702': '伊春区',
'230703': '南岔区',
'230704': '友好区',
'230705': '西林区',
'230706': '翠峦区',
'230707': '新青区',
'230708': '美溪区',
'230709': '金山屯区',
'230710': '五营区',
'230711': '乌马河区',
'230712': '汤旺河区',
'230713': '带岭区',
'230714': '乌伊岭区',
'230715': '红星区',
'230716': '上甘岭区',
'230722': '嘉荫县',
'230781': '铁力市',
'230800': '佳木斯市',
'230801': '市辖区',
'230803': '向阳区',
'230804': '前进区',
'230805': '东风区',
'230811': '郊区',
'230822': '桦南县',
'230826': '桦川县',
'230828': '汤原县',
'230881': '同江市',
'230882': '富锦市',
'230883': '抚远市',
'230900': '七台河市',
'230901': '市辖区',
'230902': '新兴区',
'230903': '桃山区',
'230904': '茄子河区',
'230921': '勃利县',
'231000': '牡丹江市',
'231001': '市辖区',
'231002': '东安区',
'231003': '阳明区',
'231004': '爱民区',
'231005': '西安区',
'231025': '林口县',
'231071': '牡丹江经济技术开发区',
'231081': '绥芬河市',
'231083': '海林市',
'231084': '宁安市',
'231085': '穆棱市',
'231086': '东宁市',
'231100': '黑河市',
'231101': '市辖区',
'231102': '爱辉区',
'231121': '嫩江县',
'231123': '逊克县',
'231124': '孙吴县',
'231181': '北安市',
'231182': '五大连池市',
'231200': '绥化市',
'231201': '市辖区',
'231202': '北林区',
'231221': '望奎县',
'231222': '兰西县',
'231223': '青冈县',
'231224': '庆安县',
'231225': '明水县',
'231226': '绥棱县',
'231281': '安达市',
'231282': '肇东市',
'231283': '海伦市',
'232700': '大兴安岭地区',
'232701': '漠河市',
'232721': '呼玛县',
'232722': '塔河县',
'232761': '加格达奇区',
'232762': '松岭区',
'232763': '新林区',
'232764': '呼中区',
'310000': '上海市',
'310100': '市辖区',
'310101': '黄浦区',
'310104': '徐汇区',
'310105': '长宁区',
'310106': '静安区',
'310107': '普陀区',
'310109': '虹口区',
'310110': '杨浦区',
'310112': '闵行区',
'310113': '宝山区',
'310114': '嘉定区',
'310115': '浦东新区',
'310116': '金山区',
'310117': '松江区',
'310118': '青浦区',
'310120': '奉贤区',
'310151': '崇明区',
'320000': '江苏省',
'320100': '南京市',
'320101': '市辖区',
'320102': '玄武区',
'320104': '秦淮区',
'320105': '建邺区',
'320106': '鼓楼区',
'320111': '浦口区',
'320113': '栖霞区',
'320114': '雨花台区',
'320115': '江宁区',
'320116': '六合区',
'320117': '溧水区',
'320118': '高淳区',
'320200': '无锡市',
'320201': '市辖区',
'320205': '锡山区',
'320206': '惠山区',
'320211': '滨湖区',
'320213': '梁溪区',
'320214': '新吴区',
'320281': '江阴市',
'320282': '宜兴市',
'320300': '徐州市',
'320301': '市辖区',
'320302': '鼓楼区',
'320303': '云龙区',
'320305': '贾汪区',
'320311': '泉山区',
'320312': '铜山区',
'320321': '丰县',
'320322': '沛县',
'320324': '睢宁县',
'320371': '徐州经济技术开发区',
'320381': '新沂市',
'320382': '邳州市',
'320400': '常州市',
'320401': '市辖区',
'320402': '天宁区',
'320404': '钟楼区',
'320411': '新北区',
'320412': '武进区',
'320413': '金坛区',
'320481': '溧阳市',
'320500': '苏州市',
'320501': '市辖区',
'320505': '虎丘区',
'320506': '吴中区',
'320507': '相城区',
'320508': '姑苏区',
'320509': '吴江区',
'320571': '苏州工业园区',
'320581': '常熟市',
'320582': '张家港市',
'320583': '昆山市',
'320585': '太仓市',
'320600': '南通市',
'320601': '市辖区',
'320602': '崇川区',
'320611': '港闸区',
'320612': '通州区',
'320623': '如东县',
'320671': '南通经济技术开发区',
'320681': '启东市',
'320682': '如皋市',
'320684': '海门市',
'320685': '海安市',
'320700': '连云港市',
'320701': '市辖区',
'320703': '连云区',
'320706': '海州区',
'320707': '赣榆区',
'320722': '东海县',
'320723': '灌云县',
'320724': '灌南县',
'320771': '连云港经济技术开发区',
'320772': '连云港高新技术产业开发区',
'320800': '淮安市',
'320801': '市辖区',
'320803': '淮安区',
'320804': '淮阴区',
'320812': '清江浦区',
'320813': '洪泽区',
'320826': '涟水县',
'320830': '盱眙县',
'320831': '金湖县',
'320871': '淮安经济技术开发区',
'320900': '盐城市',
'320901': '市辖区',
'320902': '亭湖区',
'320903': '盐都区',
'320904': '大丰区',
'320921': '响水县',
'320922': '滨海县',
'320923': '阜宁县',
'320924': '射阳县',
'320925': '建湖县',
'320971': '盐城经济技术开发区',
'320981': '东台市',
'321000': '扬州市',
'321001': '市辖区',
'321002': '广陵区',
'321003': '邗江区',
'321012': '江都区',
'321023': '宝应县',
'321071': '扬州经济技术开发区',
'321081': '仪征市',
'321084': '高邮市',
'321100': '镇江市',
'321101': '市辖区',
'321102': '京口区',
'321111': '润州区',
'321112': '丹徒区',
'321171': '镇江新区',
'321181': '丹阳市',
'321182': '扬中市',
'321183': '句容市',
'321200': '泰州市',
'321201': '市辖区',
'321202': '海陵区',
'321203': '高港区',
'321204': '姜堰区',
'321271': '泰州医药高新技术产业开发区',
'321281': '兴化市',
'321282': '靖江市',
'321283': '泰兴市',
'321300': '宿迁市',
'321301': '市辖区',
'321302': '宿城区',
'321311': '宿豫区',
'321322': '沭阳县',
'321323': '泗阳县',
'321324': '泗洪县',
'321371': '宿迁经济技术开发区',
'330000': '浙江省',
'330100': '杭州市',
'330101': '市辖区',
'330102': '上城区',
'330103': '下城区',
'330104': '江干区',
'330105': '拱墅区',
'330106': '西湖区',
'330108': '滨江区',
'330109': '萧山区',
'330110': '余杭区',
'330111': '富阳区',
'330112': '临安区',
'330122': '桐庐县',
'330127': '淳安县',
'330182': '建德市',
'330200': '宁波市',
'330201': '市辖区',
'330203': '海曙区',
'330205': '江北区',
'330206': '北仑区',
'330211': '镇海区',
'330212': '鄞州区',
'330213': '奉化区',
'330225': '象山县',
'330226': '宁海县',
'330281': '余姚市',
'330282': '慈溪市',
'330300': '温州市',
'330301': '市辖区',
'330302': '鹿城区',
'330303': '龙湾区',
'330304': '瓯海区',
'330305': '洞头区',
'330324': '永嘉县',
'330326': '平阳县',
'330327': '苍南县',
'330328': '文成县',
'330329': '泰顺县',
'330371': '温州经济技术开发区',
'330381': '瑞安市',
'330382': '乐清市',
'330400': '嘉兴市',
'330401': '市辖区',
'330402': '南湖区',
'330411': '秀洲区',
'330421': '嘉善县',
'330424': '海盐县',
'330481': '海宁市',
'330482': '平湖市',
'330483': '桐乡市',
'330500': '湖州市',
'330501': '市辖区',
'330502': '吴兴区',
'330503': '南浔区',
'330521': '德清县',
'330522': '长兴县',
'330523': '安吉县',
'330600': '绍兴市',
'330601': '市辖区',
'330602': '越城区',
'330603': '柯桥区',
'330604': '上虞区',
'330624': '新昌县',
'330681': '诸暨市',
'330683': '嵊州市',
'330700': '金华市',
'330701': '市辖区',
'330702': '婺城区',
'330703': '金东区',
'330723': '武义县',
'330726': '浦江县',
'330727': '磐安县',
'330781': '兰溪市',
'330782': '义乌市',
'330783': '东阳市',
'330784': '永康市',
'330800': '衢州市',
'330801': '市辖区',
'330802': '柯城区',
'330803': '衢江区',
'330822': '常山县',
'330824': '开化县',
'330825': '龙游县',
'330881': '江山市',
'330900': '舟山市',
'330901': '市辖区',
'330902': '定海区',
'330903': '普陀区',
'330921': '岱山县',
'330922': '嵊泗县',
'331000': '台州市',
'331001': '市辖区',
'331002': '椒江区',
'331003': '黄岩区',
'331004': '路桥区',
'331022': '三门县',
'331023': '天台县',
'331024': '仙居县',
'331081': '温岭市',
'331082': '临海市',
'331083': '玉环市',
'331100': '丽水市',
'331101': '市辖区',
'331102': '莲都区',
'331121': '青田县',
'331122': '缙云县',
'331123': '遂昌县',
'331124': '松阳县',
'331125': '云和县',
'331126': '庆元县',
'331127': '景宁畲族自治县',
'331181': '龙泉市',
'340000': '安徽省',
'340100': '合肥市',
'340101': '市辖区',
'340102': '瑶海区',
'340103': '庐阳区',
'340104': '蜀山区',
'340111': '包河区',
'340121': '长丰县',
'340122': '肥东县',
'340123': '肥西县',
'340124': '庐江县',
'340171': '合肥高新技术产业开发区',
'340172': '合肥经济技术开发区',
'340173': '合肥新站高新技术产业开发区',
'340181': '巢湖市',
'340200': '芜湖市',
'340201': '市辖区',
'340202': '镜湖区',
'340203': '弋江区',
'340207': '鸠江区',
'340208': '三山区',
'340221': '芜湖县',
'340222': '繁昌县',
'340223': '南陵县',
'340225': '无为县',
'340271': '芜湖经济技术开发区',
'340272': '安徽芜湖长江大桥经济开发区',
'340300': '蚌埠市',
'340301': '市辖区',
'340302': '龙子湖区',
'340303': '蚌山区',
'340304': '禹会区',
'340311': '淮上区',
'340321': '怀远县',
'340322': '五河县',
'340323': '固镇县',
'340371': '蚌埠市高新技术开发区',
'340372': '蚌埠市经济开发区',
'340400': '淮南市',
'340401': '市辖区',
'340402': '大通区',
'340403': '田家庵区',
'340404': '谢家集区',
'340405': '八公山区',
'340406': '潘集区',
'340421': '凤台县',
'340422': '寿县',
'340500': '马鞍山市',
'340501': '市辖区',
'340503': '花山区',
'340504': '雨山区',
'340506': '博望区',
'340521': '当涂县',
'340522': '含山县',
'340523': '和县',
'340600': '淮北市',
'340601': '市辖区',
'340602': '杜集区',
'340603': '相山区',
'340604': '烈山区',
'340621': '濉溪县',
'340700': '铜陵市',
'340701': '市辖区',
'340705': '铜官区',
'340706': '义安区',
'340711': '郊区',
'340722': '枞阳县',
'340800': '安庆市',
'340801': '市辖区',
'340802': '迎江区',
'340803': '大观区',
'340811': '宜秀区',
'340822': '怀宁县',
'340825': '太湖县',
'340826': '宿松县',
'340827': '望江县',
'340828': '岳西县',
'340871': '安徽安庆经济开发区',
'340881': '桐城市',
'340882': '潜山市',
'341000': '黄山市',
'341001': '市辖区',
'341002': '屯溪区',
'341003': '黄山区',
'341004': '徽州区',
'341021': '歙县',
'341022': '休宁县',
'341023': '黟县',
'341024': '祁门县',
'341100': '滁州市',
'341101': '市辖区',
'341102': '琅琊区',
'341103': '南谯区',
'341122': '来安县',
'341124': '全椒县',
'341125': '定远县',
'341126': '凤阳县',
'341171': '苏滁现代产业园',
'341172': '滁州经济技术开发区',
'341181': '天长市',
'341182': '明光市',
'341200': '阜阳市',
'341201': '市辖区',
'341202': '颍州区',
'341203': '颍东区',
'341204': '颍泉区',
'341221': '临泉县',
'341222': '太和县',
'341225': '阜南县',
'341226': '颍上县',
'341271': '阜阳合肥现代产业园区',
'341272': '阜阳经济技术开发区',
'341282': '界首市',
'341300': '宿州市',
'341301': '市辖区',
'341302': '埇桥区',
'341321': '砀山县',
'341322': '萧县',
'341323': '灵璧县',
'341324': '泗县',
'341371': '宿州马鞍山现代产业园区',
'341372': '宿州经济技术开发区',
'341500': '六安市',
'341501': '市辖区',
'341502': '金安区',
'341503': '裕安区',
'341504': '叶集区',
'341522': '霍邱县',
'341523': '舒城县',
'341524': '金寨县',
'341525': '霍山县',
'341600': '亳州市',
'341601': '市辖区',
'341602': '谯城区',
'341621': '涡阳县',
'341622': '蒙城县',
'341623': '利辛县',
'341700': '池州市',
'341701': '市辖区',
'341702': '贵池区',
'341721': '东至县',
'341722': '石台县',
'341723': '青阳县',
'341800': '宣城市',
'341801': '市辖区',
'341802': '宣州区',
'341821': '郎溪县',
'341822': '广德县',
'341823': '泾县',
'341824': '绩溪县',
'341825': '旌德县',
'341871': '宣城市经济开发区',
'341881': '宁国市',
'350000': '福建省',
'350100': '福州市',
'350101': '市辖区',
'350102': '鼓楼区',
'350103': '台江区',
'350104': '仓山区',
'350105': '马尾区',
'350111': '晋安区',
'350112': '长乐区',
'350121': '闽侯县',
'350122': '连江县',
'350123': '罗源县',
'350124': '闽清县',
'350125': '永泰县',
'350128': '平潭县',
'350181': '福清市',
'350200': '厦门市',
'350201': '市辖区',
'350203': '思明区',
'350205': '海沧区',
'350206': '湖里区',
'350211': '集美区',
'350212': '同安区',
'350213': '翔安区',
'350300': '莆田市',
'350301': '市辖区',
'350302': '城厢区',
'350303': '涵江区',
'350304': '荔城区',
'350305': '秀屿区',
'350322': '仙游县',
'350400': '三明市',
'350401': '市辖区',
'350402': '梅列区',
'350403': '三元区',
'350421': '明溪县',
'350423': '清流县',
'350424': '宁化县',
'350425': '大田县',
'350426': '尤溪县',
'350427': '沙县',
'350428': '将乐县',
'350429': '泰宁县',
'350430': '建宁县',
'350481': '永安市',
'350500': '泉州市',
'350501': '市辖区',
'350502': '鲤城区',
'350503': '丰泽区',
'350504': '洛江区',
'350505': '泉港区',
'350521': '惠安县',
'350524': '安溪县',
'350525': '永春县',
'350526': '德化县',
'350527': '金门县',
'350581': '石狮市',
'350582': '晋江市',
'350583': '南安市',
'350600': '漳州市',
'350601': '市辖区',
'350602': '芗城区',
'350603': '龙文区',
'350622': '云霄县',
'350623': '漳浦县',
'350624': '诏安县',
'350625': '长泰县',
'350626': '东山县',
'350627': '南靖县',
'350628': '平和县',
'350629': '华安县',
'350681': '龙海市',
'350700': '南平市',
'350701': '市辖区',
'350702': '延平区',
'350703': '建阳区',
'350721': '顺昌县',
'350722': '浦城县',
'350723': '光泽县',
'350724': '松溪县',
'350725': '政和县',
'350781': '邵武市',
'350782': '武夷山市',
'350783': '建瓯市',
'350800': '龙岩市',
'350801': '市辖区',
'350802': '新罗区',
'350803': '永定区',
'350821': '长汀县',
'350823': '上杭县',
'350824': '武平县',
'350825': '连城县',
'350881': '漳平市',
'350900': '宁德市',
'350901': '市辖区',
'350902': '蕉城区',
'350921': '霞浦县',
'350922': '古田县',
'350923': '屏南县',
'350924': '寿宁县',
'350925': '周宁县',
'350926': '柘荣县',
'350981': '福安市',
'350982': '福鼎市',
'360000': '江西省',
'360100': '南昌市',
'360101': '市辖区',
'360102': '东湖区',
'360103': '西湖区',
'360104': '青云谱区',
'360105': '湾里区',
'360111': '青山湖区',
'360112': '新建区',
'360121': '南昌县',
'360123': '安义县',
'360124': '进贤县',
'360200': '景德镇市',
'360201': '市辖区',
'360202': '昌江区',
'360203': '珠山区',
'360222': '浮梁县',
'360281': '乐平市',
'360300': '萍乡市',
'360301': '市辖区',
'360302': '安源区',
'360313': '湘东区',
'360321': '莲花县',
'360322': '上栗县',
'360323': '芦溪县',
'360400': '九江市',
'360401': '市辖区',
'360402': '濂溪区',
'360403': '浔阳区',
'360404': '柴桑区',
'360423': '武宁县',
'360424': '修水县',
'360425': '永修县',
'360426': '德安县',
'360428': '都昌县',
'360429': '湖口县',
'360430': '彭泽县',
'360481': '瑞昌市',
'360482': '共青城市',
'360483': '庐山市',
'360500': '新余市',
'360501': '市辖区',
'360502': '渝水区',
'360521': '分宜县',
'360600': '鹰潭市',
'360601': '市辖区',
'360602': '月湖区',
'360603': '余江区',
'360681': '贵溪市',
'360700': '赣州市',
'360701': '市辖区',
'360702': '章贡区',
'360703': '南康区',
'360704': '赣县区',
'360722': '信丰县',
'360723': '大余县',
'360724': '上犹县',
'360725': '崇义县',
'360726': '安远县',
'360727': '龙南县',
'360728': '定南县',
'360729': '全南县',
'360730': '宁都县',
'360731': '于都县',
'360732': '兴国县',
'360733': '会昌县',
'360734': '寻乌县',
'360735': '石城县',
'360781': '瑞金市',
'360800': '吉安市',
'360801': '市辖区',
'360802': '吉州区',
'360803': '青原区',
'360821': '吉安县',
'360822': '吉水县',
'360823': '峡江县',
'360824': '新干县',
'360825': '永丰县',
'360826': '泰和县',
'360827': '遂川县',
'360828': '万安县',
'360829': '安福县',
'360830': '永新县',
'360881': '井冈山市',
'360900': '宜春市',
'360901': '市辖区',
'360902': '袁州区',
'360921': '奉新县',
'360922': '万载县',
'360923': '上高县',
'360924': '宜丰县',
'360925': '靖安县',
'360926': '铜鼓县',
'360981': '丰城市',
'360982': '樟树市',
'360983': '高安市',
'361000': '抚州市',
'361001': '市辖区',
'361002': '临川区',
'361003': '东乡区',
'361021': '南城县',
'361022': '黎川县',
'361023': '南丰县',
'361024': '崇仁县',
'361025': '乐安县',
'361026': '宜黄县',
'361027': '金溪县',
'361028': '资溪县',
'361030': '广昌县',
'361100': '上饶市',
'361101': '市辖区',
'361102': '信州区',
'361103': '广丰区',
'361121': '上饶县',
'361123': '玉山县',
'361124': '铅山县',
'361125': '横峰县',
'361126': '弋阳县',
'361127': '余干县',
'361128': '鄱阳县',
'361129': '万年县',
'361130': '婺源县',
'361181': '德兴市',
'370000': '山东省',
'370100': '济南市',
'370101': '市辖区',
'370102': '历下区',
'370103': '市中区',
'370104': '槐荫区',
'370105': '天桥区',
'370112': '历城区',
'370113': '长清区',
'370114': '章丘区',
'370115': '济阳区',
'370124': '平阴县',
'370126': '商河县',
'370171': '济南高新技术产业开发区',
'370200': '青岛市',
'370201': '市辖区',
'370202': '市南区',
'370203': '市北区',
'370211': '黄岛区',
'370212': '崂山区',
'370213': '李沧区',
'370214': '城阳区',
'370215': '即墨区',
'370271': '青岛高新技术产业开发区',
'370281': '胶州市',
'370283': '平度市',
'370285': '莱西市',
'370300': '淄博市',
'370301': '市辖区',
'370302': '淄川区',
'370303': '张店区',
'370304': '博山区',
'370305': '临淄区',
'370306': '周村区',
'370321': '桓台县',
'370322': '高青县',
'370323': '沂源县',
'370400': '枣庄市',
'370401': '市辖区',
'370402': '市中区',
'370403': '薛城区',
'370404': '峄城区',
'370405': '台儿庄区',
'370406': '山亭区',
'370481': '滕州市',
'370500': '东营市',
'370501': '市辖区',
'370502': '东营区',
'370503': '河口区',
'370505': '垦利区',
'370522': '利津县',
'370523': '广饶县',
'370571': '东营经济技术开发区',
'370572': '东营港经济开发区',
'370600': '烟台市',
'370601': '市辖区',
'370602': '芝罘区',
'370611': '福山区',
'370612': '牟平区',
'370613': '莱山区',
'370634': '长岛县',
'370671': '烟台高新技术产业开发区',
'370672': '烟台经济技术开发区',
'370681': '龙口市',
'370682': '莱阳市',
'370683': '莱州市',
'370684': '蓬莱市',
'370685': '招远市',
'370686': '栖霞市',
'370687': '海阳市',
'370700': '潍坊市',
'370701': '市辖区',
'370702': '潍城区',
'370703': '寒亭区',
'370704': '坊子区',
'370705': '奎文区',
'370724': '临朐县',
'370725': '昌乐县',
'370772': '潍坊滨海经济技术开发区',
'370781': '青州市',
'370782': '诸城市',
'370783': '寿光市',
'370784': '安丘市',
'370785': '高密市',
'370786': '昌邑市',
'370800': '济宁市',
'370801': '市辖区',
'370811': '任城区',
'370812': '兖州区',
'370826': '微山县',
'370827': '鱼台县',
'370828': '金乡县',
'370829': '嘉祥县',
'370830': '汶上县',
'370831': '泗水县',
'370832': '梁山县',
'370871': '济宁高新技术产业开发区',
'370881': '曲阜市',
'370883': '邹城市',
'370900': '泰安市',
'370901': '市辖区',
'370902': '泰山区',
'370911': '岱岳区',
'370921': '宁阳县',
'370923': '东平县',
'370982': '新泰市',
'370983': '肥城市',
'371000': '威海市',
'371001': '市辖区',
'371002': '环翠区',
'371003': '文登区',
'371071': '威海火炬高技术产业开发区',
'371072': '威海经济技术开发区',
'371073': '威海临港经济技术开发区',
'371082': '荣成市',
'371083': '乳山市',
'371100': '日照市',
'371101': '市辖区',
'371102': '东港区',
'371103': '岚山区',
'371121': '五莲县',
'371122': '莒县',
'371171': '日照经济技术开发区',
'371200': '莱芜市',
'371201': '市辖区',
'371202': '莱城区',
'371203': '钢城区',
'371300': '临沂市',
'371301': '市辖区',
'371302': '兰山区',
'371311': '罗庄区',
'371312': '河东区',
'371321': '沂南县',
'371322': '郯城县',
'371323': '沂水县',
'371324': '兰陵县',
'371325': '费县',
'371326': '平邑县',
'371327': '莒南县',
'371328': '蒙阴县',
'371329': '临沭县',
'371371': '临沂高新技术产业开发区',
'371372': '临沂经济技术开发区',
'371373': '临沂临港经济开发区',
'371400': '德州市',
'371401': '市辖区',
'371402': '德城区',
'371403': '陵城区',
'371422': '宁津县',
'371423': '庆云县',
'371424': '临邑县',
'371425': '齐河县',
'371426': '平原县',
'371427': '夏津县',
'371428': '武城县',
'371471': '德州经济技术开发区',
'371472': '德州运河经济开发区',
'371481': '乐陵市',
'371482': '禹城市',
'371500': '聊城市',
'371501': '市辖区',
'371502': '东昌府区',
'371521': '阳谷县',
'371522': '莘县',
'371523': '茌平县',
'371524': '东阿县',
'371525': '冠县',
'371526': '高唐县',
'371581': '临清市',
'371600': '滨州市',
'371601': '市辖区',
'371602': '滨城区',
'371603': '沾化区',
'371621': '惠民县',
'371622': '阳信县',
'371623': '无棣县',
'371625': '博兴县',
'371681': '邹平市',
'371700': '菏泽市',
'371701': '市辖区',
'371702': '牡丹区',
'371703': '定陶区',
'371721': '曹县',
'371722': '单县',
'371723': '成武县',
'371724': '巨野县',
'371725': '郓城县',
'371726': '鄄城县',
'371728': '东明县',
'371771': '菏泽经济技术开发区',
'371772': '菏泽高新技术开发区',
'410000': '河南省',
'410100': '郑州市',
'410101': '市辖区',
'410102': '中原区',
'410103': '二七区',
'410104': '管城回族区',
'410105': '金水区',
'410106': '上街区',
'410108': '惠济区',
'410122': '中牟县',
'410171': '郑州经济技术开发区',
'410172': '郑州高新技术产业开发区',
'410173': '郑州航空港经济综合实验区',
'410181': '巩义市',
'410182': '荥阳市',
'410183': '新密市',
'410184': '新郑市',
'410185': '登封市',
'410200': '开封市',
'410201': '市辖区',
'410202': '龙亭区',
'410203': '顺河回族区',
'410204': '鼓楼区',
'410205': '禹王台区',
'410212': '祥符区',
'410221': '杞县',
'410222': '通许县',
'410223': '尉氏县',
'410225': '兰考县',
'410300': '洛阳市',
'410301': '市辖区',
'410302': '老城区',
'410303': '西工区',
'410304': '瀍河回族区',
'410305': '涧西区',
'410306': '吉利区',
'410311': '洛龙区',
'410322': '孟津县',
'410323': '新安县',
'410324': '栾川县',
'410325': '嵩县',
'410326': '汝阳县',
'410327': '宜阳县',
'410328': '洛宁县',
'410329': '伊川县',
'410371': '洛阳高新技术产业开发区',
'410381': '偃师市',
'410400': '平顶山市',
'410401': '市辖区',
'410402': '新华区',
'410403': '卫东区',
'410404': '石龙区',
'410411': '湛河区',
'410421': '宝丰县',
'410422': '叶县',
'410423': '鲁山县',
'410425': '郏县',
'410471': '平顶山高新技术产业开发区',
'410472': '平顶山市新城区',
'410481': '舞钢市',
'410482': '汝州市',
'410500': '安阳市',
'410501': '市辖区',
'410502': '文峰区',
'410503': '北关区',
'410505': '殷都区',
'410506': '龙安区',
'410522': '安阳县',
'410523': '汤阴县',
'410526': '滑县',
'410527': '内黄县',
'410571': '安阳高新技术产业开发区',
'410581': '林州市',
'410600': '鹤壁市',
'410601': '市辖区',
'410602': '鹤山区',
'410603': '山城区',
'410611': '淇滨区',
'410621': '浚县',
'410622': '淇县',
'410671': '鹤壁经济技术开发区',
'410700': '新乡市',
'410701': '市辖区',
'410702': '红旗区',
'410703': '卫滨区',
'410704': '凤泉区',
'410711': '牧野区',
'410721': '新乡县',
'410724': '获嘉县',
'410725': '原阳县',
'410726': '延津县',
'410727': '封丘县',
'410728': '长垣县',
'410771': '新乡高新技术产业开发区',
'410772': '新乡经济技术开发区',
'410773': '新乡市平原城乡一体化示范区',
'410781': '卫辉市',
'410782': '辉县市',
'410800': '焦作市',
'410801': '市辖区',
'410802': '解放区',
'410803': '中站区',
'410804': '马村区',
'410811': '山阳区',
'410821': '修武县',
'410822': '博爱县',
'410823': '武陟县',
'410825': '温县',
'410871': '焦作城乡一体化示范区',
'410882': '沁阳市',
'410883': '孟州市',
'410900': '濮阳市',
'410901': '市辖区',
'410902': '华龙区',
'410922': '清丰县',
'410923': '南乐县',
'410926': '范县',
'410927': '台前县',
'410928': '濮阳县',
'410971': '河南濮阳工业园区',
'410972': '濮阳经济技术开发区',
'411000': '许昌市',
'411001': '市辖区',
'411002': '魏都区',
'411003': '建安区',
'411024': '鄢陵县',
'411025': '襄城县',
'411071': '许昌经济技术开发区',
'411081': '禹州市',
'411082': '长葛市',
'411100': '漯河市',
'411101': '市辖区',
'411102': '源汇区',
'411103': '郾城区',
'411104': '召陵区',
'411121': '舞阳县',
'411122': '临颍县',
'411171': '漯河经济技术开发区',
'411200': '三门峡市',
'411201': '市辖区',
'411202': '湖滨区',
'411203': '陕州区',
'411221': '渑池县',
'411224': '卢氏县',
'411271': '河南三门峡经济开发区',
'411281': '义马市',
'411282': '灵宝市',
'411300': '南阳市',
'411301': '市辖区',
'411302': '宛城区',
'411303': '卧龙区',
'411321': '南召县',
'411322': '方城县',
'411323': '西峡县',
'411324': '镇平县',
'411325': '内乡县',
'411326': '淅川县',
'411327': '社旗县',
'411328': '唐河县',
'411329': '新野县',
'411330': '桐柏县',
'411371': '南阳高新技术产业开发区',
'411372': '南阳市城乡一体化示范区',
'411381': '邓州市',
'411400': '商丘市',
'411401': '市辖区',
'411402': '梁园区',
'411403': '睢阳区',
'411421': '民权县',
'411422': '睢县',
'411423': '宁陵县',
'411424': '柘城县',
'411425': '虞城县',
'411426': '夏邑县',
'411471': '豫东综合物流产业聚集区',
'411472': '河南商丘经济开发区',
'411481': '永城市',
'411500': '信阳市',
'411501': '市辖区',
'411502': '浉河区',
'411503': '平桥区',
'411521': '罗山县',
'411522': '光山县',
'411523': '新县',
'411524': '商城县',
'411525': '固始县',
'411526': '潢川县',
'411527': '淮滨县',
'411528': '息县',
'411571': '信阳高新技术产业开发区',
'411600': '周口市',
'411601': '市辖区',
'411602': '川汇区',
'411621': '扶沟县',
'411622': '西华县',
'411623': '商水县',
'411624': '沈丘县',
'411625': '郸城县',
'411626': '淮阳县',
'411627': '太康县',
'411628': '鹿邑县',
'411671': '河南周口经济开发区',
'411681': '项城市',
'411700': '驻马店市',
'411701': '市辖区',
'411702': '驿城区',
'411721': '西平县',
'411722': '上蔡县',
'411723': '平舆县',
'411724': '正阳县',
'411725': '确山县',
'411726': '泌阳县',
'411727': '汝南县',
'411728': '遂平县',
'411729': '新蔡县',
'411771': '河南驻马店经济开发区',
'419000': '省直辖县级行政区划',
'419001': '济源市',
'420000': '湖北省',
'420100': '武汉市',
'420101': '市辖区',
'420102': '江岸区',
'420103': '江汉区',
'420104': '硚口区',
'420105': '汉阳区',
'420106': '武昌区',
'420107': '青山区',
'420111': '洪山区',
'420112': '东西湖区',
'420113': '汉南区',
'420114': '蔡甸区',
'420115': '江夏区',
'420116': '黄陂区',
'420117': '新洲区',
'420200': '黄石市',
'420201': '市辖区',
'420202': '黄石港区',
'420203': '西塞山区',
'420204': '下陆区',
'420205': '铁山区',
'420222': '阳新县',
'420281': '大冶市',
'420300': '十堰市',
'420301': '市辖区',
'420302': '茅箭区',
'420303': '张湾区',
'420304': '郧阳区',
'420322': '郧西县',
'420323': '竹山县',
'420324': '竹溪县',
'420325': '房县',
'420381': '丹江口市',
'420500': '宜昌市',
'420501': '市辖区',
'420502': '西陵区',
'420503': '伍家岗区',
'420504': '点军区',
'420505': '猇亭区',
'420506': '夷陵区',
'420525': '远安县',
'420526': '兴山县',
'420527': '秭归县',
'420528': '长阳土家族自治县',
'420529': '五峰土家族自治县',
'420581': '宜都市',
'420582': '当阳市',
'420583': '枝江市',
'420600': '襄阳市',
'420601': '市辖区',
'420602': '襄城区',
'420606': '樊城区',
'420607': '襄州区',
'420624': '南漳县',
'420625': '谷城县',
'420626': '保康县',
'420682': '老河口市',
'420683': '枣阳市',
'420684': '宜城市',
'420700': '鄂州市',
'420701': '市辖区',
'420702': '梁子湖区',
'420703': '华容区',
'420704': '鄂城区',
'420800': '荆门市',
'420801': '市辖区',
'420802': '东宝区',
'420804': '掇刀区',
'420822': '沙洋县',
'420881': '钟祥市',
'420882': '京山市',
'420900': '孝感市',
'420901': '市辖区',
'420902': '孝南区',
'420921': '孝昌县',
'420922': '大悟县',
'420923': '云梦县',
'420981': '应城市',
'420982': '安陆市',
'420984': '汉川市',
'421000': '荆州市',
'421001': '市辖区',
'421002': '沙市区',
'421003': '荆州区',
'421022': '公安县',
'421023': '监利县',
'421024': '江陵县',
'421071': '荆州经济技术开发区',
'421081': '石首市',
'421083': '洪湖市',
'421087': '松滋市',
'421100': '黄冈市',
'421101': '市辖区',
'421102': '黄州区',
'421121': '团风县',
'421122': '红安县',
'421123': '罗田县',
'421124': '英山县',
'421125': '浠水县',
'421126': '蕲春县',
'421127': '黄梅县',
'421171': '龙感湖管理区',
'421181': '麻城市',
'421182': '武穴市',
'421200': '咸宁市',
'421201': '市辖区',
'421202': '咸安区',
'421221': '嘉鱼县',
'421222': '通城县',
'421223': '崇阳县',
'421224': '通山县',
'421281': '赤壁市',
'421300': '随州市',
'421301': '市辖区',
'421303': '曾都区',
'421321': '随县',
'421381': '广水市',
'422800': '恩施土家族苗族自治州',
'422801': '恩施市',
'422802': '利川市',
'422822': '建始县',
'422823': '巴东县',
'422825': '宣恩县',
'422826': '咸丰县',
'422827': '来凤县',
'422828': '鹤峰县',
'429000': '省直辖县级行政区划',
'429004': '仙桃市',
'429005': '潜江市',
'429006': '天门市',
'429021': '神农架林区',
'430000': '湖南省',
'430100': '长沙市',
'430101': '市辖区',
'430102': '芙蓉区',
'430103': '天心区',
'430104': '岳麓区',
'430105': '开福区',
'430111': '雨花区',
'430112': '望城区',
'430121': '长沙县',
'430181': '浏阳市',
'430182': '宁乡市',
'430200': '株洲市',
'430201': '市辖区',
'430202': '荷塘区',
'430203': '芦淞区',
'430204': '石峰区',
'430211': '天元区',
'430212': '渌口区',
'430223': '攸县',
'430224': '茶陵县',
'430225': '炎陵县',
'430271': '云龙示范区',
'430281': '醴陵市',
'430300': '湘潭市',
'430301': '市辖区',
'430302': '雨湖区',
'430304': '岳塘区',
'430321': '湘潭县',
'430371': '湖南湘潭高新技术产业园区',
'430372': '湘潭昭山示范区',
'430373': '湘潭九华示范区',
'430381': '湘乡市',
'430382': '韶山市',
'430400': '衡阳市',
'430401': '市辖区',
'430405': '珠晖区',
'430406': '雁峰区',
'430407': '石鼓区',
'430408': '蒸湘区',
'430412': '南岳区',
'430421': '衡阳县',
'430422': '衡南县',
'430423': '衡山县',
'430424': '衡东县',
'430426': '祁东县',
'430471': '衡阳综合保税区',
'430472': '湖南衡阳高新技术产业园区',
'430473': '湖南衡阳松木经济开发区',
'430481': '耒阳市',
'430482': '常宁市',
'430500': '邵阳市',
'430501': '市辖区',
'430502': '双清区',
'430503': '大祥区',
'430511': '北塔区',
'430521': '邵东县',
'430522': '新邵县',
'430523': '邵阳县',
'430524': '隆回县',
'430525': '洞口县',
'430527': '绥宁县',
'430528': '新宁县',
'430529': '城步苗族自治县',
'430581': '武冈市',
'430600': '岳阳市',
'430601': '市辖区',
'430602': '岳阳楼区',
'430603': '云溪区',
'430611': '君山区',
'430621': '岳阳县',
'430623': '华容县',
'430624': '湘阴县',
'430626': '平江县',
'430671': '岳阳市屈原管理区',
'430681': '汨罗市',
'430682': '临湘市',
'430700': '常德市',
'430701': '市辖区',
'430702': '武陵区',
'430703': '鼎城区',
'430721': '安乡县',
'430722': '汉寿县',
'430723': '澧县',
'430724': '临澧县',
'430725': '桃源县',
'430726': '石门县',
'430771': '常德市西洞庭管理区',
'430781': '津市市',
'430800': '张家界市',
'430801': '市辖区',
'430802': '永定区',
'430811': '武陵源区',
'430821': '慈利县',
'430822': '桑植县',
'430900': '益阳市',
'430901': '市辖区',
'430902': '资阳区',
'430903': '赫山区',
'430921': '南县',
'430922': '桃江县',
'430923': '安化县',
'430971': '益阳市大通湖管理区',
'430972': '湖南益阳高新技术产业园区',
'430981': '沅江市',
'431000': '郴州市',
'431001': '市辖区',
'431002': '北湖区',
'431003': '苏仙区',
'431021': '桂阳县',
'431022': '宜章县',
'431023': '永兴县',
'431024': '嘉禾县',
'431025': '临武县',
'431026': '汝城县',
'431027': '桂东县',
'431028': '安仁县',
'431081': '资兴市',
'431100': '永州市',
'431101': '市辖区',
'431102': '零陵区',
'431103': '冷水滩区',
'431121': '祁阳县',
'431122': '东安县',
'431123': '双牌县',
'431124': '道县',
'431125': '江永县',
'431126': '宁远县',
'431127': '蓝山县',
'431128': '新田县',
'431129': '江华瑶族自治县',
'431171': '永州经济技术开发区',
'431172': '永州市金洞管理区',
'431173': '永州市回龙圩管理区',
'431200': '怀化市',
'431201': '市辖区',
'431202': '鹤城区',
'431221': '中方县',
'431222': '沅陵县',
'431223': '辰溪县',
'431224': '溆浦县',
'431225': '会同县',
'431226': '麻阳苗族自治县',
'431227': '新晃侗族自治县',
'431228': '芷江侗族自治县',
'431229': '靖州苗族侗族自治县',
'431230': '通道侗族自治县',
'431271': '怀化市洪江管理区',
'431281': '洪江市',
'431300': '娄底市',
'431301': '市辖区',
'431302': '娄星区',
'431321': '双峰县',
'431322': '新化县',
'431381': '冷水江市',
'431382': '涟源市',
'433100': '湘西土家族苗族自治州',
'433101': '吉首市',
'433122': '泸溪县',
'433123': '凤凰县',
'433124': '花垣县',
'433125': '保靖县',
'433126': '古丈县',
'433127': '永顺县',
'433130': '龙山县',
'433172': '湖南吉首经济开发区',
'433173': '湖南永顺经济开发区',
'440000': '广东省',
'440100': '广州市',
'440101': '市辖区',
'440103': '荔湾区',
'440104': '越秀区',
'440105': '海珠区',
'440106': '天河区',
'440111': '白云区',
'440112': '黄埔区',
'440113': '番禺区',
'440114': '花都区',
'440115': '南沙区',
'440117': '从化区',
'440118': '增城区',
'440200': '韶关市',
'440201': '市辖区',
'440203': '武江区',
'440204': '浈江区',
'440205': '曲江区',
'440222': '始兴县',
'440224': '仁化县',
'440229': '翁源县',
'440232': '乳源瑶族自治县',
'440233': '新丰县',
'440281': '乐昌市',
'440282': '南雄市',
'440300': '深圳市',
'440301': '市辖区',
'440303': '罗湖区',
'440304': '福田区',
'440305': '南山区',
'440306': '宝安区',
'440307': '龙岗区',
'440308': '盐田区',
'440309': '龙华区',
'440310': '坪山区',
'440311': '光明区',
'440400': '珠海市',
'440401': '市辖区',
'440402': '香洲区',
'440403': '斗门区',
'440404': '金湾区',
'440500': '汕头市',
'440501': '市辖区',
'440507': '龙湖区',
'440511': '金平区',
'440512': '濠江区',
'440513': '潮阳区',
'440514': '潮南区',
'440515': '澄海区',
'440523': '南澳县',
'440600': '佛山市',
'440601': '市辖区',
'440604': '禅城区',
'440605': '南海区',
'440606': '顺德区',
'440607': '三水区',
'440608': '高明区',
'440700': '江门市',
'440701': '市辖区',
'440703': '蓬江区',
'440704': '江海区',
'440705': '新会区',
'440781': '台山市',
'440783': '开平市',
'440784': '鹤山市',
'440785': '恩平市',
'440800': '湛江市',
'440801': '市辖区',
'440802': '赤坎区',
'440803': '霞山区',
'440804': '坡头区',
'440811': '麻章区',
'440823': '遂溪县',
'440825': '徐闻县',
'440881': '廉江市',
'440882': '雷州市',
'440883': '吴川市',
'440900': '茂名市',
'440901': '市辖区',
'440902': '茂南区',
'440904': '电白区',
'440981': '高州市',
'440982': '化州市',
'440983': '信宜市',
'441200': '肇庆市',
'441201': '市辖区',
'441202': '端州区',
'441203': '鼎湖区',
'441204': '高要区',
'441223': '广宁县',
'441224': '怀集县',
'441225': '封开县',
'441226': '德庆县',
'441284': '四会市',
'441300': '惠州市',
'441301': '市辖区',
'441302': '惠城区',
'441303': '惠阳区',
'441322': '博罗县',
'441323': '惠东县',
'441324': '龙门县',
'441400': '梅州市',
'441401': '市辖区',
'441402': '梅江区',
'441403': '梅县区',
'441422': '大埔县',
'441423': '丰顺县',
'441424': '五华县',
'441426': '平远县',
'441427': '蕉岭县',
'441481': '兴宁市',
'441500': '汕尾市',
'441501': '市辖区',
'441502': '城区',
'441521': '海丰县',
'441523': '陆河县',
'441581': '陆丰市',
'441600': '河源市',
'441601': '市辖区',
'441602': '源城区',
'441621': '紫金县',
'441622': '龙川县',
'441623': '连平县',
'441624': '和平县',
'441625': '东源县',
'441700': '阳江市',
'441701': '市辖区',
'441702': '江城区',
'441704': '阳东区',
'441721': '阳西县',
'441781': '阳春市',
'441800': '清远市',
'441801': '市辖区',
'441802': '清城区',
'441803': '清新区',
'441821': '佛冈县',
'441823': '阳山县',
'441825': '连山壮族瑶族自治县',
'441826': '连南瑶族自治县',
'441881': '英德市',
'441882': '连州市',
'441900': '东莞市',
'442000': '中山市',
'445100': '潮州市',
'445101': '市辖区',
'445102': '湘桥区',
'445103': '潮安区',
'445122': '饶平县',
'445200': '揭阳市',
'445201': '市辖区',
'445202': '榕城区',
'445203': '揭东区',
'445222': '揭西县',
'445224': '惠来县',
'445281': '普宁市',
'445300': '云浮市',
'445301': '市辖区',
'445302': '云城区',
'445303': '云安区',
'445321': '新兴县',
'445322': '郁南县',
'445381': '罗定市',
'450000': '广西壮族自治区',
'450100': '南宁市',
'450101': '市辖区',
'450102': '兴宁区',
'450103': '青秀区',
'450105': '江南区',
'450107': '西乡塘区',
'450108': '良庆区',
'450109': '邕宁区',
'450110': '武鸣区',
'450123': '隆安县',
'450124': '马山县',
'450125': '上林县',
'450126': '宾阳县',
'450127': '横县',
'450200': '柳州市',
'450201': '市辖区',
'450202': '城中区',
'450203': '鱼峰区',
'450204': '柳南区',
'450205': '柳北区',
'450206': '柳江区',
'450222': '柳城县',
'450223': '鹿寨县',
'450224': '融安县',
'450225': '融水苗族自治县',
'450226': '三江侗族自治县',
'450300': '桂林市',
'450301': '市辖区',
'450302': '秀峰区',
'450303': '叠彩区',
'450304': '象山区',
'450305': '七星区',
'450311': '雁山区',
'450312': '临桂区',
'450321': '阳朔县',
'450323': '灵川县',
'450324': '全州县',
'450325': '兴安县',
'450326': '永福县',
'450327': '灌阳县',
'450328': '龙胜各族自治县',
'450329': '资源县',
'450330': '平乐县',
'450332': '恭城瑶族自治县',
'450381': '荔浦市',
'450400': '梧州市',
'450401': '市辖区',
'450403': '万秀区',
'450405': '长洲区',
'450406': '龙圩区',
'450421': '苍梧县',
'450422': '藤县',
'450423': '蒙山县',
'450481': '岑溪市',
'450500': '北海市',
'450501': '市辖区',
'450502': '海城区',
'450503': '银海区',
'450512': '铁山港区',
'450521': '合浦县',
'450600': '防城港市',
'450601': '市辖区',
'450602': '港口区',
'450603': '防城区',
'450621': '上思县',
'450681': '东兴市',
'450700': '钦州市',
'450701': '市辖区',
'450702': '钦南区',
'450703': '钦北区',
'450721': '灵山县',
'450722': '浦北县',
'450800': '贵港市',
'450801': '市辖区',
'450802': '港北区',
'450803': '港南区',
'450804': '覃塘区',
'450821': '平南县',
'450881': '桂平市',
'450900': '玉林市',
'450901': '市辖区',
'450902': '玉州区',
'450903': '福绵区',
'450921': '容县',
'450922': '陆川县',
'450923': '博白县',
'450924': '兴业县',
'450981': '北流市',
'451000': '百色市',
'451001': '市辖区',
'451002': '右江区',
'451021': '田阳县',
'451022': '田东县',
'451023': '平果县',
'451024': '德保县',
'451026': '那坡县',
'451027': '凌云县',
'451028': '乐业县',
'451029': '田林县',
'451030': '西林县',
'451031': '隆林各族自治县',
'451081': '靖西市',
'451100': '贺州市',
'451101': '市辖区',
'451102': '八步区',
'451103': '平桂区',
'451121': '昭平县',
'451122': '钟山县',
'451123': '富川瑶族自治县',
'451200': '河池市',
'451201': '市辖区',
'451202': '金城江区',
'451203': '宜州区',
'451221': '南丹县',
'451222': '天峨县',
'451223': '凤山县',
'451224': '东兰县',
'451225': '罗城仫佬族自治县',
'451226': '环江毛南族自治县',
'451227': '巴马瑶族自治县',
'451228': '都安瑶族自治县',
'451229': '大化瑶族自治县',
'451300': '来宾市',
'451301': '市辖区',
'451302': '兴宾区',
'451321': '忻城县',
'451322': '象州县',
'451323': '武宣县',
'451324': '金秀瑶族自治县',
'451381': '合山市',
'451400': '崇左市',
'451401': '市辖区',
'451402': '江州区',
'451421': '扶绥县',
'451422': '宁明县',
'451423': '龙州县',
'451424': '大新县',
'451425': '天等县',
'451481': '凭祥市',
'460000': '海南省',
'460100': '海口市',
'460101': '市辖区',
'460105': '秀英区',
'460106': '龙华区',
'460107': '琼山区',
'460108': '美兰区',
'460200': '三亚市',
'460201': '市辖区',
'460202': '海棠区',
'460203': '吉阳区',
'460204': '天涯区',
'460205': '崖州区',
'460300': '三沙市',
'460321': '西沙群岛',
'460322': '南沙群岛',
'460323': '中沙群岛的岛礁及其海域',
'460400': '儋州市',
'469000': '省直辖县级行政区划',
'469001': '五指山市',
'469002': '琼海市',
'469005': '文昌市',
'469006': '万宁市',
'469007': '东方市',
'469021': '定安县',
'469022': '屯昌县',
'469023': '澄迈县',
'469024': '临高县',
'469025': '白沙黎族自治县',
'469026': '昌江黎族自治县',
'469027': '乐东黎族自治县',
'469028': '陵水黎族自治县',
'469029': '保亭黎族苗族自治县',
'469030': '琼中黎族苗族自治县',
'500000': '重庆市',
'500100': '市辖区',
'500101': '万州区',
'500102': '涪陵区',
'500103': '渝中区',
'500104': '大渡口区',
'500105': '江北区',
'500106': '沙坪坝区',
'500107': '九龙坡区',
'500108': '南岸区',
'500109': '北碚区',
'500110': '綦江区',
'500111': '大足区',
'500112': '渝北区',
'500113': '巴南区',
'500114': '黔江区',
'500115': '长寿区',
'500116': '江津区',
'500117': '合川区',
'500118': '永川区',
'500119': '南川区',
'500120': '璧山区',
'500151': '铜梁区',
'500152': '潼南区',
'500153': '荣昌区',
'500154': '开州区',
'500155': '梁平区',
'500156': '武隆区',
'500200': '县',
'500229': '城口县',
'500230': '丰都县',
'500231': '垫江县',
'500233': '忠县',
'500235': '云阳县',
'500236': '奉节县',
'500237': '巫山县',
'500238': '巫溪县',
'500240': '石柱土家族自治县',
'500241': '秀山土家族苗族自治县',
'500242': '酉阳土家族苗族自治县',
'500243': '彭水苗族土家族自治县',
'510000': '四川省',
'510100': '成都市',
'510101': '市辖区',
'510104': '锦江区',
'510105': '青羊区',
'510106': '金牛区',
'510107': '武侯区',
'510108': '成华区',
'510112': '龙泉驿区',
'510113': '青白江区',
'510114': '新都区',
'510115': '温江区',
'510116': '双流区',
'510117': '郫都区',
'510121': '金堂县',
'510129': '大邑县',
'510131': '蒲江县',
'510132': '新津县',
'510181': '都江堰市',
'510182': '彭州市',
'510183': '邛崃市',
'510184': '崇州市',
'510185': '简阳市',
'510300': '自贡市',
'510301': '市辖区',
'510302': '自流井区',
'510303': '贡井区',
'510304': '大安区',
'510311': '沿滩区',
'510321': '荣县',
'510322': '富顺县',
'510400': '攀枝花市',
'510401': '市辖区',
'510402': '东区',
'510403': '西区',
'510411': '仁和区',
'510421': '米易县',
'510422': '盐边县',
'510500': '泸州市',
'510501': '市辖区',
'510502': '江阳区',
'510503': '纳溪区',
'510504': '龙马潭区',
'510521': '泸县',
'510522': '合江县',
'510524': '叙永县',
'510525': '古蔺县',
'510600': '德阳市',
'510601': '市辖区',
'510603': '旌阳区',
'510604': '罗江区',
'510623': '中江县',
'510681': '广汉市',
'510682': '什邡市',
'510683': '绵竹市',
'510700': '绵阳市',
'510701': '市辖区',
'510703': '涪城区',
'510704': '游仙区',
'510705': '安州区',
'510722': '三台县',
'510723': '盐亭县',
'510725': '梓潼县',
'510726': '北川羌族自治县',
'510727': '平武县',
'510781': '江油市',
'510800': '广元市',
'510801': '市辖区',
'510802': '利州区',
'510811': '昭化区',
'510812': '朝天区',
'510821': '旺苍县',
'510822': '青川县',
'510823': '剑阁县',
'510824': '苍溪县',
'510900': '遂宁市',
'510901': '市辖区',
'510903': '船山区',
'510904': '安居区',
'510921': '蓬溪县',
'510922': '射洪县',
'510923': '大英县',
'511000': '内江市',
'511001': '市辖区',
'511002': '市中区',
'511011': '东兴区',
'511024': '威远县',
'511025': '资中县',
'511071': '内江经济开发区',
'511083': '隆昌市',
'511100': '乐山市',
'511101': '市辖区',
'511102': '市中区',
'511111': '沙湾区',
'511112': '五通桥区',
'511113': '金口河区',
'511123': '犍为县',
'511124': '井研县',
'511126': '夹江县',
'511129': '沐川县',
'511132': '峨边彝族自治县',
'511133': '马边彝族自治县',
'511181': '峨眉山市',
'511300': '南充市',
'511301': '市辖区',
'511302': '顺庆区',
'511303': '高坪区',
'511304': '嘉陵区',
'511321': '南部县',
'511322': '营山县',
'511323': '蓬安县',
'511324': '仪陇县',
'511325': '西充县',
'511381': '阆中市',
'511400': '眉山市',
'511401': '市辖区',
'511402': '东坡区',
'511403': '彭山区',
'511421': '仁寿县',
'511423': '洪雅县',
'511424': '丹棱县',
'511425': '青神县',
'511500': '宜宾市',
'511501': '市辖区',
'511502': '翠屏区',
'511503': '南溪区',
'511504': '叙州区',
'511523': '江安县',
'511524': '长宁县',
'511525': '高县',
'511526': '珙县',
'511527': '筠连县',
'511528': '兴文县',
'511529': '屏山县',
'511600': '广安市',
'511601': '市辖区',
'511602': '广安区',
'511603': '前锋区',
'511621': '岳池县',
'511622': '武胜县',
'511623': '邻水县',
'511681': '华蓥市',
'511700': '达州市',
'511701': '市辖区',
'511702': '通川区',
'511703': '达川区',
'511722': '宣汉县',
'511723': '开江县',
'511724': '大竹县',
'511725': '渠县',
'511771': '达州经济开发区',
'511781': '万源市',
'511800': '雅安市',
'511801': '市辖区',
'511802': '雨城区',
'511803': '名山区',
'511822': '荥经县',
'511823': '汉源县',
'511824': '石棉县',
'511825': '天全县',
'511826': '芦山县',
'511827': '宝兴县',
'511900': '巴中市',
'511901': '市辖区',
'511902': '巴州区',
'511903': '恩阳区',
'511921': '通江县',
'511922': '南江县',
'511923': '平昌县',
'511971': '巴中经济开发区',
'512000': '资阳市',
'512001': '市辖区',
'512002': '雁江区',
'512021': '安岳县',
'512022': '乐至县',
'513200': '阿坝藏族羌族自治州',
'513201': '马尔康市',
'513221': '汶川县',
'513222': '理县',
'513223': '茂县',
'513224': '松潘县',
'513225': '九寨沟县',
'513226': '金川县',
'513227': '小金县',
'513228': '黑水县',
'513230': '壤塘县',
'513231': '阿坝县',
'513232': '若尔盖县',
'513233': '红原县',
'513300': '甘孜藏族自治州',
'513301': '康定市',
'513322': '泸定县',
'513323': '丹巴县',
'513324': '九龙县',
'513325': '雅江县',
'513326': '道孚县',
'513327': '炉霍县',
'513328': '甘孜县',
'513329': '新龙县',
'513330': '德格县',
'513331': '白玉县',
'513332': '石渠县',
'513333': '色达县',
'513334': '理塘县',
'513335': '巴塘县',
'513336': '乡城县',
'513337': '稻城县',
'513338': '得荣县',
'513400': '凉山彝族自治州',
'513401': '西昌市',
'513422': '木里藏族自治县',
'513423': '盐源县',
'513424': '德昌县',
'513425': '会理县',
'513426': '会东县',
'513427': '宁南县',
'513428': '普格县',
'513429': '布拖县',
'513430': '金阳县',
'513431': '昭觉县',
'513432': '喜德县',
'513433': '冕宁县',
'513434': '越西县',
'513435': '甘洛县',
'513436': '美姑县',
'513437': '雷波县',
'520000': '贵州省',
'520100': '贵阳市',
'520101': '市辖区',
'520102': '南明区',
'520103': '云岩区',
'520111': '花溪区',
'520112': '乌当区',
'520113': '白云区',
'520115': '观山湖区',
'520121': '开阳县',
'520122': '息烽县',
'520123': '修文县',
'520181': '清镇市',
'520200': '六盘水市',
'520201': '钟山区',
'520203': '六枝特区',
'520221': '水城县',
'520281': '盘州市',
'520300': '遵义市',
'520301': '市辖区',
'520302': '红花岗区',
'520303': '汇川区',
'520304': '播州区',
'520322': '桐梓县',
'520323': '绥阳县',
'520324': '正安县',
'520325': '道真仡佬族苗族自治县',
'520326': '务川仡佬族苗族自治县',
'520327': '凤冈县',
'520328': '湄潭县',
'520329': '余庆县',
'520330': '习水县',
'520381': '赤水市',
'520382': '仁怀市',
'520400': '安顺市',
'520401': '市辖区',
'520402': '西秀区',
'520403': '平坝区',
'520422': '普定县',
'520423': '镇宁布依族苗族自治县',
'520424': '关岭布依族苗族自治县',
'520425': '紫云苗族布依族自治县',
'520500': '毕节市',
'520501': '市辖区',
'520502': '七星关区',
'520521': '大方县',
'520522': '黔西县',
'520523': '金沙县',
'520524': '织金县',
'520525': '纳雍县',
'520526': '威宁彝族回族苗族自治县',
'520527': '赫章县',
'520600': '铜仁市',
'520601': '市辖区',
'520602': '碧江区',
'520603': '万山区',
'520621': '江口县',
'520622': '玉屏侗族自治县',
'520623': '石阡县',
'520624': '思南县',
'520625': '印江土家族苗族自治县',
'520626': '德江县',
'520627': '沿河土家族自治县',
'520628': '松桃苗族自治县',
'522300': '黔西南布依族苗族自治州',
'522301': '兴义市',
'522302': '兴仁市',
'522323': '普安县',
'522324': '晴隆县',
'522325': '贞丰县',
'522326': '望谟县',
'522327': '册亨县',
'522328': '安龙县',
'522600': '黔东南苗族侗族自治州',
'522601': '凯里市',
'522622': '黄平县',
'522623': '施秉县',
'522624': '三穗县',
'522625': '镇远县',
'522626': '岑巩县',
'522627': '天柱县',
'522628': '锦屏县',
'522629': '剑河县',
'522630': '台江县',
'522631': '黎平县',
'522632': '榕江县',
'522633': '从江县',
'522634': '雷山县',
'522635': '麻江县',
'522636': '丹寨县',
'522700': '黔南布依族苗族自治州',
'522701': '都匀市',
'522702': '福泉市',
'522722': '荔波县',
'522723': '贵定县',
'522725': '瓮安县',
'522726': '独山县',
'522727': '平塘县',
'522728': '罗甸县',
'522729': '长顺县',
'522730': '龙里县',
'522731': '惠水县',
'522732': '三都水族自治县',
'530000': '云南省',
'530100': '昆明市',
'530101': '市辖区',
'530102': '五华区',
'530103': '盘龙区',
'530111': '官渡区',
'530112': '西山区',
'530113': '东川区',
'530114': '呈贡区',
'530115': '晋宁区',
'530124': '富民县',
'530125': '宜良县',
'530126': '石林彝族自治县',
'530127': '嵩明县',
'530128': '禄劝彝族苗族自治县',
'530129': '寻甸回族彝族自治县',
'530181': '安宁市',
'530300': '曲靖市',
'530301': '市辖区',
'530302': '麒麟区',
'530303': '沾益区',
'530304': '马龙区',
'530322': '陆良县',
'530323': '师宗县',
'530324': '罗平县',
'530325': '富源县',
'530326': '会泽县',
'530381': '宣威市',
'530400': '玉溪市',
'530401': '市辖区',
'530402': '红塔区',
'530403': '江川区',
'530422': '澄江县',
'530423': '通海县',
'530424': '华宁县',
'530425': '易门县',
'530426': '峨山彝族自治县',
'530427': '新平彝族傣族自治县',
'530428': '元江哈尼族彝族傣族自治县',
'530500': '保山市',
'530501': '市辖区',
'530502': '隆阳区',
'530521': '施甸县',
'530523': '龙陵县',
'530524': '昌宁县',
'530581': '腾冲市',
'530600': '昭通市',
'530601': '市辖区',
'530602': '昭阳区',
'530621': '鲁甸县',
'530622': '巧家县',
'530623': '盐津县',
'530624': '大关县',
'530625': '永善县',
'530626': '绥江县',
'530627': '镇雄县',
'530628': '彝良县',
'530629': '威信县',
'530681': '水富市',
'530700': '丽江市',
'530701': '市辖区',
'530702': '古城区',
'530721': '玉龙纳西族自治县',
'530722': '永胜县',
'530723': '华坪县',
'530724': '宁蒗彝族自治县',
'530800': '普洱市',
'530801': '市辖区',
'530802': '思茅区',
'530821': '宁洱哈尼族彝族自治县',
'530822': '墨江哈尼族自治县',
'530823': '景东彝族自治县',
'530824': '景谷傣族彝族自治县',
'530825': '镇沅彝族哈尼族拉祜族自治县',
'530826': '江城哈尼族彝族自治县',
'530827': '孟连傣族拉祜族佤族自治县',
'530828': '澜沧拉祜族自治县',
'530829': '西盟佤族自治县',
'530900': '临沧市',
'530901': '市辖区',
'530902': '临翔区',
'530921': '凤庆县',
'530922': '云县',
'530923': '永德县',
'530924': '镇康县',
'530925': '双江拉祜族佤族布朗族傣族自治县',
'530926': '耿马傣族佤族自治县',
'530927': '沧源佤族自治县',
'532300': '楚雄彝族自治州',
'532301': '楚雄市',
'532322': '双柏县',
'532323': '牟定县',
'532324': '南华县',
'532325': '姚安县',
'532326': '大姚县',
'532327': '永仁县',
'532328': '元谋县',
'532329': '武定县',
'532331': '禄丰县',
'532500': '红河哈尼族彝族自治州',
'532501': '个旧市',
'532502': '开远市',
'532503': '蒙自市',
'532504': '弥勒市',
'532523': '屏边苗族自治县',
'532524': '建水县',
'532525': '石屏县',
'532527': '泸西县',
'532528': '元阳县',
'532529': '红河县',
'532530': '金平苗族瑶族傣族自治县',
'532531': '绿春县',
'532532': '河口瑶族自治县',
'532600': '文山壮族苗族自治州',
'532601': '文山市',
'532622': '砚山县',
'532623': '西畴县',
'532624': '麻栗坡县',
'532625': '马关县',
'532626': '丘北县',
'532627': '广南县',
'532628': '富宁县',
'532800': '西双版纳傣族自治州',
'532801': '景洪市',
'532822': '勐海县',
'532823': '勐腊县',
'532900': '大理白族自治州',
'532901': '大理市',
'532922': '漾濞彝族自治县',
'532923': '祥云县',
'532924': '宾川县',
'532925': '弥渡县',
'532926': '南涧彝族自治县',
'532927': '巍山彝族回族自治县',
'532928': '永平县',
'532929': '云龙县',
'532930': '洱源县',
'532931': '剑川县',
'532932': '鹤庆县',
'533100': '德宏傣族景颇族自治州',
'533102': '瑞丽市',
'533103': '芒市',
'533122': '梁河县',
'533123': '盈江县',
'533124': '陇川县',
'533300': '怒江傈僳族自治州',
'533301': '泸水市',
'533323': '福贡县',
'533324': '贡山独龙族怒族自治县',
'533325': '兰坪白族普米族自治县',
'533400': '迪庆藏族自治州',
'533401': '香格里拉市',
'533422': '德钦县',
'533423': '维西傈僳族自治县',
'540000': '西藏自治区',
'540100': '拉萨市',
'540101': '市辖区',
'540102': '城关区',
'540103': '堆龙德庆区',
'540104': '达孜区',
'540121': '林周县',
'540122': '当雄县',
'540123': '尼木县',
'540124': '曲水县',
'540127': '墨竹工卡县',
'540171': '格尔木藏青工业园区',
'540172': '拉萨经济技术开发区',
'540173': '西藏文化旅游创意园区',
'540174': '达孜工业园区',
'540200': '日喀则市',
'540202': '桑珠孜区',
'540221': '南木林县',
'540222': '江孜县',
'540223': '定日县',
'540224': '萨迦县',
'540225': '拉孜县',
'540226': '昂仁县',
'540227': '谢通门县',
'540228': '白朗县',
'540229': '仁布县',
'540230': '康马县',
'540231': '定结县',
'540232': '仲巴县',
'540233': '亚东县',
'540234': '吉隆县',
'540235': '聂拉木县',
'540236': '萨嘎县',
'540237': '岗巴县',
'540300': '昌都市',
'540302': '卡若区',
'540321': '江达县',
'540322': '贡觉县',
'540323': '类乌齐县',
'540324': '丁青县',
'540325': '察雅县',
'540326': '八宿县',
'540327': '左贡县',
'540328': '芒康县',
'540329': '洛隆县',
'540330': '边坝县',
'540400': '林芝市',
'540402': '巴宜区',
'540421': '工布江达县',
'540422': '米林县',
'540423': '墨脱县',
'540424': '波密县',
'540425': '察隅县',
'540426': '朗县',
'540500': '山南市',
'540501': '市辖区',
'540502': '乃东区',
'540521': '扎囊县',
'540522': '贡嘎县',
'540523': '桑日县',
'540524': '琼结县',
'540525': '曲松县',
'540526': '措美县',
'540527': '洛扎县',
'540528': '加查县',
'540529': '隆子县',
'540530': '错那县',
'540531': '浪卡子县',
'540600': '那曲市',
'540602': '色尼区',
'540621': '嘉黎县',
'540622': '比如县',
'540623': '聂荣县',
'540624': '安多县',
'540625': '申扎县',
'540626': '索县',
'540627': '班戈县',
'540628': '巴青县',
'540629': '尼玛县',
'540630': '双湖县',
'542500': '阿里地区',
'542521': '普兰县',
'542522': '札达县',
'542523': '噶尔县',
'542524': '日土县',
'542525': '革吉县',
'542526': '改则县',
'542527': '措勤县',
'610000': '陕西省',
'610100': '西安市',
'610101': '市辖区',
'610102': '新城区',
'610103': '碑林区',
'610104': '莲湖区',
'610111': '灞桥区',
'610112': '未央区',
'610113': '雁塔区',
'610114': '阎良区',
'610115': '临潼区',
'610116': '长安区',
'610117': '高陵区',
'610118': '鄠邑区',
'610122': '蓝田县',
'610124': '周至县',
'610200': '铜川市',
'610201': '市辖区',
'610202': '王益区',
'610203': '印台区',
'610204': '耀州区',
'610222': '宜君县',
'610300': '宝鸡市',
'610301': '市辖区',
'610302': '渭滨区',
'610303': '金台区',
'610304': '陈仓区',
'610322': '凤翔县',
'610323': '岐山县',
'610324': '扶风县',
'610326': '眉县',
'610327': '陇县',
'610328': '千阳县',
'610329': '麟游县',
'610330': '凤县',
'610331': '太白县',
'610400': '咸阳市',
'610401': '市辖区',
'610402': '秦都区',
'610403': '杨陵区',
'610404': '渭城区',
'610422': '三原县',
'610423': '泾阳县',
'610424': '乾县',
'610425': '礼泉县',
'610426': '永寿县',
'610428': '长武县',
'610429': '旬邑县',
'610430': '淳化县',
'610431': '武功县',
'610481': '兴平市',
'610482': '彬州市',
'610500': '渭南市',
'610501': '市辖区',
'610502': '临渭区',
'610503': '华州区',
'610522': '潼关县',
'610523': '大荔县',
'610524': '合阳县',
'610525': '澄城县',
'610526': '蒲城县',
'610527': '白水县',
'610528': '富平县',
'610581': '韩城市',
'610582': '华阴市',
'610600': '延安市',
'610601': '市辖区',
'610602': '宝塔区',
'610603': '安塞区',
'610621': '延长县',
'610622': '延川县',
'610623': '子长县',
'610625': '志丹县',
'610626': '吴起县',
'610627': '甘泉县',
'610628': '富县',
'610629': '洛川县',
'610630': '宜川县',
'610631': '黄龙县',
'610632': '黄陵县',
'610700': '汉中市',
'610701': '市辖区',
'610702': '汉台区',
'610703': '南郑区',
'610722': '城固县',
'610723': '洋县',
'610724': '西乡县',
'610725': '勉县',
'610726': '宁强县',
'610727': '略阳县',
'610728': '镇巴县',
'610729': '留坝县',
'610730': '佛坪县',
'610800': '榆林市',
'610801': '市辖区',
'610802': '榆阳区',
'610803': '横山区',
'610822': '府谷县',
'610824': '靖边县',
'610825': '定边县',
'610826': '绥德县',
'610827': '米脂县',
'610828': '佳县',
'610829': '吴堡县',
'610830': '清涧县',
'610831': '子洲县',
'610881': '神木市',
'610900': '安康市',
'610901': '市辖区',
'610902': '汉滨区',
'610921': '汉阴县',
'610922': '石泉县',
'610923': '宁陕县',
'610924': '紫阳县',
'610925': '岚皋县',
'610926': '平利县',
'610927': '镇坪县',
'610928': '旬阳县',
'610929': '白河县',
'611000': '商洛市',
'611001': '市辖区',
'611002': '商州区',
'611021': '洛南县',
'611022': '丹凤县',
'611023': '商南县',
'611024': '山阳县',
'611025': '镇安县',
'611026': '柞水县',
'620000': '甘肃省',
'620100': '兰州市',
'620101': '市辖区',
'620102': '城关区',
'620103': '七里河区',
'620104': '西固区',
'620105': '安宁区',
'620111': '红古区',
'620121': '永登县',
'620122': '皋兰县',
'620123': '榆中县',
'620171': '兰州新区',
'620200': '嘉峪关市',
'620201': '市辖区',
'620300': '金昌市',
'620301': '市辖区',
'620302': '金川区',
'620321': '永昌县',
'620400': '白银市',
'620401': '市辖区',
'620402': '白银区',
'620403': '平川区',
'620421': '靖远县',
'620422': '会宁县',
'620423': '景泰县',
'620500': '天水市',
'620501': '市辖区',
'620502': '秦州区',
'620503': '麦积区',
'620521': '清水县',
'620522': '秦安县',
'620523': '甘谷县',
'620524': '武山县',
'620525': '张家川回族自治县',
'620600': '武威市',
'620601': '市辖区',
'620602': '凉州区',
'620621': '民勤县',
'620622': '古浪县',
'620623': '天祝藏族自治县',
'620700': '张掖市',
'620701': '市辖区',
'620702': '甘州区',
'620721': '肃南裕固族自治县',
'620722': '民乐县',
'620723': '临泽县',
'620724': '高台县',
'620725': '山丹县',
'620800': '平凉市',
'620801': '市辖区',
'620802': '崆峒区',
'620821': '泾川县',
'620822': '灵台县',
'620823': '崇信县',
'620825': '庄浪县',
'620826': '静宁县',
'620881': '华亭市',
'620900': '酒泉市',
'620901': '市辖区',
'620902': '肃州区',
'620921': '金塔县',
'620922': '瓜州县',
'620923': '肃北蒙古族自治县',
'620924': '阿克塞哈萨克族自治县',
'620981': '玉门市',
'620982': '敦煌市',
'621000': '庆阳市',
'621001': '市辖区',
'621002': '西峰区',
'621021': '庆城县',
'621022': '环县',
'621023': '华池县',
'621024': '合水县',
'621025': '正宁县',
'621026': '宁县',
'621027': '镇原县',
'621100': '定西市',
'621101': '市辖区',
'621102': '安定区',
'621121': '通渭县',
'621122': '陇西县',
'621123': '渭源县',
'621124': '临洮县',
'621125': '漳县',
'621126': '岷县',
'621200': '陇南市',
'621201': '市辖区',
'621202': '武都区',
'621221': '成县',
'621222': '文县',
'621223': '宕昌县',
'621224': '康县',
'621225': '西和县',
'621226': '礼县',
'621227': '徽县',
'621228': '两当县',
'622900': '临夏回族自治州',
'622901': '临夏市',
'622921': '临夏县',
'622922': '康乐县',
'622923': '永靖县',
'622924': '广河县',
'622925': '和政县',
'622926': '东乡族自治县',
'622927': '积石山保安族东乡族撒拉族自治县',
'623000': '甘南藏族自治州',
'623001': '合作市',
'623021': '临潭县',
'623022': '卓尼县',
'623023': '舟曲县',
'623024': '迭部县',
'623025': '玛曲县',
'623026': '碌曲县',
'623027': '夏河县',
'630000': '青海省',
'630100': '西宁市',
'630101': '市辖区',
'630102': '城东区',
'630103': '城中区',
'630104': '城西区',
'630105': '城北区',
'630121': '大通回族土族自治县',
'630122': '湟中县',
'630123': '湟源县',
'630200': '海东市',
'630202': '乐都区',
'630203': '平安区',
'630222': '民和回族土族自治县',
'630223': '互助土族自治县',
'630224': '化隆回族自治县',
'630225': '循化撒拉族自治县',
'632200': '海北藏族自治州',
'632221': '门源回族自治县',
'632222': '祁连县',
'632223': '海晏县',
'632224': '刚察县',
'632300': '黄南藏族自治州',
'632321': '同仁县',
'632322': '尖扎县',
'632323': '泽库县',
'632324': '河南蒙古族自治县',
'632500': '海南藏族自治州',
'632521': '共和县',
'632522': '同德县',
'632523': '贵德县',
'632524': '兴海县',
'632525': '贵南县',
'632600': '果洛藏族自治州',
'632621': '玛沁县',
'632622': '班玛县',
'632623': '甘德县',
'632624': '达日县',
'632625': '久治县',
'632626': '玛多县',
'632700': '玉树藏族自治州',
'632701': '玉树市',
'632722': '杂多县',
'632723': '称多县',
'632724': '治多县',
'632725': '囊谦县',
'632726': '曲麻莱县',
'632800': '海西蒙古族藏族自治州',
'632801': '格尔木市',
'632802': '德令哈市',
'632803': '茫崖市',
'632821': '乌兰县',
'632822': '都兰县',
'632823': '天峻县',
'632857': '大柴旦行政委员会',
'640000': '宁夏回族自治区',
'640100': '银川市',
'640101': '市辖区',
'640104': '兴庆区',
'640105': '西夏区',
'640106': '金凤区',
'640121': '永宁县',
'640122': '贺兰县',
'640181': '灵武市',
'640200': '石嘴山市',
'640201': '市辖区',
'640202': '大武口区',
'640205': '惠农区',
'640221': '平罗县',
'640300': '吴忠市',
'640301': '市辖区',
'640302': '利通区',
'640303': '红寺堡区',
'640323': '盐池县',
'640324': '同心县',
'640381': '青铜峡市',
'640400': '固原市',
'640401': '市辖区',
'640402': '原州区',
'640422': '西吉县',
'640423': '隆德县',
'640424': '泾源县',
'640425': '彭阳县',
'640500': '中卫市',
'640501': '市辖区',
'640502': '沙坡头区',
'640521': '中宁县',
'640522': '海原县',
'650000': '新疆维吾尔自治区',
'650100': '乌鲁木齐市',
'650101': '市辖区',
'650102': '天山区',
'650103': '沙依巴克区',
'650104': '新市区',
'650105': '水磨沟区',
'650106': '头屯河区',
'650107': '达坂城区',
'650109': '米东区',
'650121': '乌鲁木齐县',
'650171': '乌鲁木齐经济技术开发区',
'650172': '乌鲁木齐高新技术产业开发区',
'650200': '克拉玛依市',
'650201': '市辖区',
'650202': '独山子区',
'650203': '克拉玛依区',
'650204': '白碱滩区',
'650205': '乌尔禾区',
'650400': '吐鲁番市',
'650402': '高昌区',
'650421': '鄯善县',
'650422': '托克逊县',
'650500': '哈密市',
'650502': '伊州区',
'650521': '巴里坤哈萨克自治县',
'650522': '伊吾县',
'652300': '昌吉回族自治州',
'652301': '昌吉市',
'652302': '阜康市',
'652323': '呼图壁县',
'652324': '玛纳斯县',
'652325': '奇台县',
'652327': '吉木萨尔县',
'652328': '木垒哈萨克自治县',
'652700': '博尔塔拉蒙古自治州',
'652701': '博乐市',
'652702': '阿拉山口市',
'652722': '精河县',
'652723': '温泉县',
'652800': '巴音郭楞蒙古自治州',
'652801': '库尔勒市',
'652822': '轮台县',
'652823': '尉犁县',
'652824': '若羌县',
'652825': '且末县',
'652826': '焉耆回族自治县',
'652827': '和静县',
'652828': '和硕县',
'652829': '博湖县',
'652871': '库尔勒经济技术开发区',
'652900': '阿克苏地区',
'652901': '阿克苏市',
'652922': '温宿县',
'652923': '库车县',
'652924': '沙雅县',
'652925': '新和县',
'652926': '拜城县',
'652927': '乌什县',
'652928': '阿瓦提县',
'652929': '柯坪县',
'653000': '克孜勒苏柯尔克孜自治州',
'653001': '阿图什市',
'653022': '阿克陶县',
'653023': '阿合奇县',
'653024': '乌恰县',
'653100': '喀什地区',
'653101': '喀什市',
'653121': '疏附县',
'653122': '疏勒县',
'653123': '英吉沙县',
'653124': '泽普县',
'653125': '莎车县',
'653126': '叶城县',
'653127': '麦盖提县',
'653128': '岳普湖县',
'653129': '伽师县',
'653130': '巴楚县',
'653131': '塔什库尔干塔吉克自治县',
'653200': '和田地区',
'653201': '和田市',
'653221': '和田县',
'653222': '墨玉县',
'653223': '皮山县',
'653224': '洛浦县',
'653225': '策勒县',
'653226': '于田县',
'653227': '民丰县',
'654000': '伊犁哈萨克自治州',
'654002': '伊宁市',
'654003': '奎屯市',
'654004': '霍尔果斯市',
'654021': '伊宁县',
'654022': '察布查尔锡伯自治县',
'654023': '霍城县',
'654024': '巩留县',
'654025': '新源县',
'654026': '昭苏县',
'654027': '特克斯县',
'654028': '尼勒克县',
'654200': '塔城地区',
'654201': '塔城市',
'654202': '乌苏市',
'654221': '额敏县',
'654223': '沙湾县',
'654224': '托里县',
'654225': '裕民县',
'654226': '和布克赛尔蒙古自治县',
'654300': '阿勒泰地区',
'654301': '阿勒泰市',
'654321': '布尔津县',
'654322': '富蕴县',
'654323': '福海县',
'654324': '哈巴河县',
'654325': '青河县',
'654326': '吉木乃县',
'659000': '自治区直辖县级行政区划',
'659001': '石河子市',
'659002': '阿拉尔市',
'659003': '图木舒克市',
'659004': '五家渠市',
'659006': '铁门关市',
'710000': '台湾省',
'820000': '澳门特别行政区',
'810000': '香港特别行政区',
} | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/torch_utils/network/res_block.py | from typing import Optional, Union
import torch
import torch.nn as nn
from .nn_module import conv2d_block, fc_block
class ResBlock(nn.Module):
r"""
Overview:
Residual Block with 2D convolution layers, including 3 types:
basic block:
input channel: C
x -> 3*3*C -> norm -> act -> 3*3*C -> norm -> act -> out
\__________________________________________/+
bottleneck block:
x -> 1*1*(1/4*C) -> norm -> act -> 3*3*(1/4*C) -> norm -> act -> 1*1*C -> norm -> act -> out
\_____________________________________________________________________________/+
downsample block: used in EfficientZero
input channel: C
x -> 3*3*C -> norm -> act -> 3*3*C -> norm -> act -> out
\__________________ 3*3*C ____________________/+
Interfaces:
forward
"""
def __init__(
self,
in_channels: int,
activation: nn.Module = nn.ReLU(),
norm_type: str = 'BN',
res_type: str = 'basic',
bias: bool = True,
out_channels: Union[int, None] = None,
) -> None:
"""
Overview:
Init the 2D convolution residual block.
Arguments:
- in_channels (:obj:`int`): Number of channels in the input tensor.
- activation (:obj:`nn.Module`): the optional activation function.
- norm_type (:obj:`str`): type of the normalization, default set to 'BN'(Batch Normalization), \
supports ['BN', 'LN', 'IN', 'GN', 'SyncBN', None].
- res_type (:obj:`str`): type of residual block, supports ['basic', 'bottleneck', 'downsample']
- bias (:obj:`bool`): whether adds a learnable bias to the conv2d_block. default set to True.
- out_channels (:obj:`int`): Number of channels in the output tensor, default set to None,
which means out_channels = in_channels.
"""
super(ResBlock, self).__init__()
self.act = activation
assert res_type in ['basic', 'bottleneck',
'downsample'], 'residual type only support basic and bottleneck, not:{}'.format(res_type)
self.res_type = res_type
if out_channels is None:
out_channels = in_channels
if self.res_type == 'basic':
self.conv1 = conv2d_block(
in_channels, out_channels, 3, 1, 1, activation=self.act, norm_type=norm_type, bias=bias
)
self.conv2 = conv2d_block(
out_channels, out_channels, 3, 1, 1, activation=None, norm_type=norm_type, bias=bias
)
elif self.res_type == 'bottleneck':
self.conv1 = conv2d_block(
in_channels, out_channels, 1, 1, 0, activation=self.act, norm_type=norm_type, bias=bias
)
self.conv2 = conv2d_block(
out_channels, out_channels, 3, 1, 1, activation=self.act, norm_type=norm_type, bias=bias
)
self.conv3 = conv2d_block(
out_channels, out_channels, 1, 1, 0, activation=None, norm_type=norm_type, bias=bias
)
elif self.res_type == 'downsample':
self.conv1 = conv2d_block(
in_channels, out_channels, 3, 2, 1, activation=self.act, norm_type=norm_type, bias=bias
)
self.conv2 = conv2d_block(
out_channels, out_channels, 3, 1, 1, activation=None, norm_type=norm_type, bias=bias
)
self.conv3 = conv2d_block(in_channels, out_channels, 3, 2, 1, activation=None, norm_type=None, bias=bias)
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""
Overview:
Return the redisual block output.
Arguments:
- x (:obj:`torch.Tensor`): The input tensor.
Returns:
- x (:obj:`torch.Tensor`): The resblock output tensor.
"""
identity = x
x = self.conv1(x)
x = self.conv2(x)
if self.res_type == 'bottleneck':
x = self.conv3(x)
elif self.res_type == 'downsample':
identity = self.conv3(identity)
x = self.act(x + identity)
return x
class ResFCBlock(nn.Module):
r"""
Overview:
Residual Block with 2 fully connected layers.
x -> fc1 -> norm -> act -> fc2 -> norm -> act -> out
\_____________________________________/+
Interfaces:
forward
"""
def __init__(self, in_channels: int, activation: nn.Module = nn.ReLU(), norm_type: str = 'BN'):
r"""
Overview:
Init the fully connected layer residual block.
Arguments:
- in_channels (:obj:`int`): The number of channels in the input tensor.
- activation (:obj:`nn.Module`): The optional activation function.
- norm_type (:obj:`str`): The type of the normalization, default set to 'BN'.
"""
super(ResFCBlock, self).__init__()
self.act = activation
self.fc1 = fc_block(in_channels, in_channels, activation=self.act, norm_type=norm_type)
self.fc2 = fc_block(in_channels, in_channels, activation=None, norm_type=norm_type)
def forward(self, x: torch.Tensor) -> torch.Tensor:
r"""
Overview:
Return the redisual block output.
Arguments:
- x (:obj:`torch.Tensor`): The input tensor.
Returns:
- x (:obj:`torch.Tensor`): The resblock output tensor.
"""
identity = x
x = self.fc1(x)
x = self.fc2(x)
x = self.act(x + identity)
return x | PypiClean |
/FlaskFarm-4.0.104-py3-none-any.whl/flaskfarm/lib/support/base/string.py | import re
import traceback
from . import logger
class SupportString(object):
@classmethod
def get_cate_char_by_first(cls, title): # get_first
value = ord(title[0].upper())
if ord('가') <= value < ord('나'): return '가'
if ord('나') <= value < ord('다'): return '나'
if ord('다') <= value < ord('라'): return '다'
if ord('라') <= value < ord('마'): return '라'
if ord('마') <= value < ord('바'): return '마'
if ord('바') <= value < ord('사'): return '바'
if ord('사') <= value < ord('아'): return '사'
if ord('아') <= value < ord('자'): return '아'
if ord('자') <= value < ord('차'): return '자'
if ord('차') <= value < ord('카'): return '차'
if ord('카') <= value < ord('타'): return '카'
if ord('타') <= value < ord('파'): return '타'
if ord('파') <= value < ord('하'): return '파'
if ord('하') <= value < ord('힣'): return '하'
return '0Z'
@classmethod
def is_include_hangul(cls, text):
try:
hanCount = len(re.findall(u'[\u3130-\u318F\uAC00-\uD7A3]+', text))
return hanCount > 0
except:
return False
@classmethod
def language_info(cls, text):
try:
text = text.strip().replace(' ', '')
all_count = len(text)
han_count = len(re.findall('[\u3130-\u318F\uAC00-\uD7A3]', text))
eng_count = len(re.findall('[a-zA-Z]', text))
etc_count = len(re.findall('[0-9]', text))
etc_count += len(re.findall('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》:]', text))
if all_count == etc_count:
return (0,0)
han_percent = int(han_count * 100 / (all_count-etc_count))
eng_percent = int(eng_count * 100 / (all_count-etc_count))
return (han_percent, eng_percent)
except Exception as e:
logger.error(f"Exception:{str(e)}")
logger.error(traceback.format_exc())
return False
@classmethod
def remove_special_char(cls, text):
return re.sub('[-=+,#/\?:^$.@*\"※~&%ㆍ!』\\‘|\(\)\[\]\<\>`\'…》:]', '', text) | PypiClean |
/Mako-1.2.4.tar.gz/Mako-1.2.4/doc/build/syntax.rst | .. _syntax_toplevel:
======
Syntax
======
A Mako template is parsed from a text stream containing any kind
of content, XML, HTML, email text, etc. The template can further
contain Mako-specific directives which represent variable and/or
expression substitutions, control structures (i.e. conditionals
and loops), server-side comments, full blocks of Python code, as
well as various tags that offer additional functionality. All of
these constructs compile into real Python code. This means that
you can leverage the full power of Python in almost every aspect
of a Mako template.
Expression Substitution
=======================
The simplest expression is just a variable substitution. The
syntax for this is the ``${}`` construct, which is inspired by
Perl, Genshi, JSP EL, and others:
.. sourcecode:: mako
this is x: ${x}
Above, the string representation of ``x`` is applied to the
template's output stream. If you're wondering where ``x`` comes
from, it's usually from the :class:`.Context` supplied to the
template's rendering function. If ``x`` was not supplied to the
template and was not otherwise assigned locally, it evaluates to
a special value ``UNDEFINED``. More on that later.
The contents within the ``${}`` tag are evaluated by Python
directly, so full expressions are OK:
.. sourcecode:: mako
pythagorean theorem: ${pow(x,2) + pow(y,2)}
The results of the expression are evaluated into a string result
in all cases before being rendered to the output stream, such as
the above example where the expression produces a numeric
result.
Expression Escaping
===================
Mako includes a number of built-in escaping mechanisms,
including HTML, URI and XML escaping, as well as a "trim"
function. These escapes can be added to an expression
substitution using the ``|`` operator:
.. sourcecode:: mako
${"this is some text" | u}
The above expression applies URL escaping to the expression, and
produces ``this+is+some+text``. The ``u`` name indicates URL
escaping, whereas ``h`` represents HTML escaping, ``x``
represents XML escaping, and ``trim`` applies a trim function.
Read more about built-in filtering functions, including how to
make your own filter functions, in :ref:`filtering_toplevel`.
Control Structures
==================
A control structure refers to all those things that control the
flow of a program -- conditionals (i.e. ``if``/``else``), loops (like
``while`` and ``for``), as well as things like ``try``/``except``. In Mako,
control structures are written using the ``%`` marker followed
by a regular Python control expression, and are "closed" by
using another ``%`` marker with the tag "``end<name>``", where
"``<name>``" is the keyword of the expression:
.. sourcecode:: mako
% if x==5:
this is some output
% endif
The ``%`` can appear anywhere on the line as long as no text
precedes it; indentation is not significant. The full range of
Python "colon" expressions are allowed here, including
``if``/``elif``/``else``, ``while``, ``for``, ``with``, and even ``def``,
although Mako has a built-in tag for defs which is more full-featured.
.. sourcecode:: mako
% for a in ['one', 'two', 'three', 'four', 'five']:
% if a[0] == 't':
its two or three
% elif a[0] == 'f':
four/five
% else:
one
% endif
% endfor
The ``%`` sign can also be "escaped", if you actually want to
emit a percent sign as the first non whitespace character on a
line, by escaping it as in ``%%``:
.. sourcecode:: mako
%% some text
%% some more text
The Loop Context
----------------
The **loop context** provides additional information about a loop
while inside of a ``% for`` structure:
.. sourcecode:: mako
<ul>
% for a in ("one", "two", "three"):
<li>Item ${loop.index}: ${a}</li>
% endfor
</ul>
See :ref:`loop_context` for more information on this feature.
.. versionadded:: 0.7
Comments
========
Comments come in two varieties. The single line comment uses
``##`` as the first non-space characters on a line:
.. sourcecode:: mako
## this is a comment.
...text ...
A multiline version exists using ``<%doc> ...text... </%doc>``:
.. sourcecode:: mako
<%doc>
these are comments
more comments
</%doc>
Newline Filters
===============
The backslash ("``\``") character, placed at the end of any
line, will consume the newline character before continuing to
the next line:
.. sourcecode:: mako
here is a line that goes onto \
another line.
The above text evaluates to:
.. sourcecode:: text
here is a line that goes onto another line.
Python Blocks
=============
Any arbitrary block of python can be dropped in using the ``<%
%>`` tags:
.. sourcecode:: mako
this is a template
<%
x = db.get_resource('foo')
y = [z.element for z in x if x.frobnizzle==5]
%>
% for elem in y:
element: ${elem}
% endfor
Within ``<% %>``, you're writing a regular block of Python code.
While the code can appear with an arbitrary level of preceding
whitespace, it has to be consistently formatted with itself.
Mako's compiler will adjust the block of Python to be consistent
with the surrounding generated Python code.
Module-level Blocks
===================
A variant on ``<% %>`` is the module-level code block, denoted
by ``<%! %>``. Code within these tags is executed at the module
level of the template, and not within the rendering function of
the template. Therefore, this code does not have access to the
template's context and is only executed when the template is
loaded into memory (which can be only once per application, or
more, depending on the runtime environment). Use the ``<%! %>``
tags to declare your template's imports, as well as any
pure-Python functions you might want to declare:
.. sourcecode:: mako
<%!
import mylib
import re
def filter(text):
return re.sub(r'^@', '', text)
%>
Any number of ``<%! %>`` blocks can be declared anywhere in a
template; they will be rendered in the resulting module
in a single contiguous block above all render callables,
in the order in which they appear in the source template.
Tags
====
The rest of what Mako offers takes place in the form of tags.
All tags use the same syntax, which is similar to an XML tag
except that the first character of the tag name is a ``%``
character. The tag is closed either by a contained slash
character, or an explicit closing tag:
.. sourcecode:: mako
<%include file="foo.txt"/>
<%def name="foo" buffered="True">
this is a def
</%def>
All tags have a set of attributes which are defined for each
tag. Some of these attributes are required. Also, many
attributes support **evaluation**, meaning you can embed an
expression (using ``${}``) inside the attribute text:
.. sourcecode:: mako
<%include file="/foo/bar/${myfile}.txt"/>
Whether or not an attribute accepts runtime evaluation depends
on the type of tag and how that tag is compiled into the
template. The best way to find out if you can stick an
expression in is to try it! The lexer will tell you if it's not
valid.
Heres a quick summary of all the tags:
``<%page>``
-----------
This tag defines general characteristics of the template,
including caching arguments, and optional lists of arguments
which the template expects when invoked.
.. sourcecode:: mako
<%page args="x, y, z='default'"/>
Or a page tag that defines caching characteristics:
.. sourcecode:: mako
<%page cached="True" cache_type="memory"/>
Currently, only one ``<%page>`` tag gets used per template, the
rest get ignored. While this will be improved in a future
release, for now make sure you have only one ``<%page>`` tag
defined in your template, else you may not get the results you
want. Further details on what ``<%page>`` is used for are described
in the following sections:
* :ref:`namespaces_body` - ``<%page>`` is used to define template-level
arguments and defaults
* :ref:`expression_filtering` - expression filters can be applied to all
expressions throughout a template using the ``<%page>`` tag
* :ref:`caching_toplevel` - options to control template-level caching
may be applied in the ``<%page>`` tag.
``<%include>``
--------------
A tag that is familiar from other template languages, ``%include``
is a regular joe that just accepts a file argument and calls in
the rendered result of that file:
.. sourcecode:: mako
<%include file="header.html"/>
hello world
<%include file="footer.html"/>
Include also accepts arguments which are available as ``<%page>`` arguments in the receiving template:
.. sourcecode:: mako
<%include file="toolbar.html" args="current_section='members', username='ed'"/>
``<%def>``
----------
The ``%def`` tag defines a Python function which contains a set
of content, that can be called at some other point in the
template. The basic idea is simple:
.. sourcecode:: mako
<%def name="myfunc(x)">
this is myfunc, x is ${x}
</%def>
${myfunc(7)}
The ``%def`` tag is a lot more powerful than a plain Python ``def``, as
the Mako compiler provides many extra services with ``%def`` that
you wouldn't normally have, such as the ability to export defs
as template "methods", automatic propagation of the current
:class:`.Context`, buffering/filtering/caching flags, and def calls
with content, which enable packages of defs to be sent as
arguments to other def calls (not as hard as it sounds). Get the
full deal on what ``%def`` can do in :ref:`defs_toplevel`.
``<%block>``
------------
``%block`` is a tag that is close to a ``%def``,
except executes itself immediately in its base-most scope,
and can also be anonymous (i.e. with no name):
.. sourcecode:: mako
<%block filter="h">
some <html> stuff.
</%block>
Inspired by Jinja2 blocks, named blocks offer a syntactically pleasing way
to do inheritance:
.. sourcecode:: mako
<html>
<body>
<%block name="header">
<h2><%block name="title"/></h2>
</%block>
${self.body()}
</body>
</html>
Blocks are introduced in :ref:`blocks` and further described in :ref:`inheritance_toplevel`.
.. versionadded:: 0.4.1
``<%namespace>``
----------------
``%namespace`` is Mako's equivalent of Python's ``import``
statement. It allows access to all the rendering functions and
metadata of other template files, plain Python modules, as well
as locally defined "packages" of functions.
.. sourcecode:: mako
<%namespace file="functions.html" import="*"/>
The underlying object generated by ``%namespace``, an instance of
:class:`.mako.runtime.Namespace`, is a central construct used in
templates to reference template-specific information such as the
current URI, inheritance structures, and other things that are
not as hard as they sound right here. Namespaces are described
in :ref:`namespaces_toplevel`.
``<%inherit>``
--------------
Inherit allows templates to arrange themselves in **inheritance
chains**. This is a concept familiar in many other template
languages.
.. sourcecode:: mako
<%inherit file="base.html"/>
When using the ``%inherit`` tag, control is passed to the topmost
inherited template first, which then decides how to handle
calling areas of content from its inheriting templates. Mako
offers a lot of flexibility in this area, including dynamic
inheritance, content wrapping, and polymorphic method calls.
Check it out in :ref:`inheritance_toplevel`.
``<%``\ nsname\ ``:``\ defname\ ``>``
-------------------------------------
Any user-defined "tag" can be created against
a namespace by using a tag with a name of the form
``<%<namespacename>:<defname>>``. The closed and open formats of such a
tag are equivalent to an inline expression and the ``<%call>``
tag, respectively.
.. sourcecode:: mako
<%mynamespace:somedef param="some value">
this is the body
</%mynamespace:somedef>
To create custom tags which accept a body, see
:ref:`defs_with_content`.
.. versionadded:: 0.2.3
``<%call>``
-----------
The call tag is the "classic" form of a user-defined tag, and is
roughly equivalent to the ``<%namespacename:defname>`` syntax
described above. This tag is also described in :ref:`defs_with_content`.
``<%doc>``
----------
The ``%doc`` tag handles multiline comments:
.. sourcecode:: mako
<%doc>
these are comments
more comments
</%doc>
Also the ``##`` symbol as the first non-space characters on a line can be used for single line comments.
``<%text>``
-----------
This tag suspends the Mako lexer's normal parsing of Mako
template directives, and returns its entire body contents as
plain text. It is used pretty much to write documentation about
Mako:
.. sourcecode:: mako
<%text filter="h">
heres some fake mako ${syntax}
<%def name="x()">${x}</%def>
</%text>
.. _syntax_exiting_early:
Exiting Early from a Template
=============================
Sometimes you want to stop processing a template or ``<%def>``
method in the middle and just use the text you've accumulated so
far. This is accomplished by using ``return`` statement inside
a Python block. It's a good idea for the ``return`` statement
to return an empty string, which prevents the Python default return
value of ``None`` from being rendered by the template. This
return value is for semantic purposes provided in templates via
the ``STOP_RENDERING`` symbol:
.. sourcecode:: mako
% if not len(records):
No records found.
<% return STOP_RENDERING %>
% endif
Or perhaps:
.. sourcecode:: mako
<%
if not len(records):
return STOP_RENDERING
%>
In older versions of Mako, an empty string can be substituted for
the ``STOP_RENDERING`` symbol:
.. sourcecode:: mako
<% return '' %>
.. versionadded:: 1.0.2 - added the ``STOP_RENDERING`` symbol which serves
as a semantic identifier for the empty string ``""`` used by a
Python ``return`` statement.
| PypiClean |
/IOT3ApiClient-1.0.0.tar.gz/IOT3ApiClient-1.0.0/charset_normalizer/md.py | from functools import lru_cache
from typing import Optional, List
from charset_normalizer.constant import UNICODE_SECONDARY_RANGE_KEYWORD
from charset_normalizer.utils import is_punctuation, is_symbol, unicode_range, is_accentuated, is_latin, \
remove_accent, is_separator, is_cjk
class MessDetectorPlugin:
"""
Base abstract class used for mess detection plugins.
All detectors MUST extend and implement given methods.
"""
def eligible(self, character: str) -> bool:
"""
Determine if given character should be fed in.
"""
raise NotImplementedError # pragma: nocover
def feed(self, character: str) -> None:
"""
The main routine to be executed upon character.
Insert the logic in witch the text would be considered chaotic.
"""
raise NotImplementedError # pragma: nocover
def reset(self) -> None:
"""
Permit to reset the plugin to the initial state.
"""
raise NotImplementedError # pragma: nocover
@property
def ratio(self) -> float:
"""
Compute the chaos ratio based on what your feed() has seen.
Must NOT be lower than 0.; No restriction gt 0.
"""
raise NotImplementedError # pragma: nocover
class TooManySymbolOrPunctuationPlugin(MessDetectorPlugin):
def __init__(self):
self._punctuation_count = 0 # type: int
self._symbol_count = 0 # type: int
self._character_count = 0 # type: int
self._last_printable_char = None # type: Optional[str]
self._frenzy_symbol_in_word = False # type: bool
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if character != self._last_printable_char and character not in ["<", ">", "=", ":", "/", "&", ";", "{", "}", "[", "]"]:
if is_punctuation(character):
self._punctuation_count += 1
elif character.isdigit() is False and is_symbol(character):
self._symbol_count += 2
self._last_printable_char = character
def reset(self) -> None:
self._punctuation_count = 0
self._character_count = 0
self._symbol_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.
ratio_of_punctuation = (self._punctuation_count + self._symbol_count) / self._character_count # type: float
return ratio_of_punctuation if ratio_of_punctuation >= 0.3 else 0.
class TooManyAccentuatedPlugin(MessDetectorPlugin):
def __init__(self):
self._character_count = 0 # type: int
self._accentuated_count = 0 # type: int
def eligible(self, character: str) -> bool:
return character.isalpha()
def feed(self, character: str) -> None:
self._character_count += 1
if is_accentuated(character):
self._accentuated_count += 1
def reset(self) -> None:
self._character_count = 0
self._accentuated_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.
ratio_of_accentuation = self._accentuated_count / self._character_count # type: float
return ratio_of_accentuation if ratio_of_accentuation >= 0.35 else 0.
class UnprintablePlugin(MessDetectorPlugin):
def __init__(self):
self._unprintable_count = 0 # type: int
self._character_count = 0 # type: int
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character not in {'\n', '\t', '\r'} and character.isprintable() is False:
self._unprintable_count += 1
self._character_count += 1
def reset(self) -> None:
self._unprintable_count = 0
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.
return (self._unprintable_count * 8) / self._character_count
class SuspiciousDuplicateAccentPlugin(MessDetectorPlugin):
def __init__(self):
self._successive_count = 0 # type: int
self._character_count = 0 # type: int
self._last_latin_character = None # type: Optional[str]
def eligible(self, character: str) -> bool:
return is_latin(character)
def feed(self, character: str) -> None:
if self._last_latin_character is not None:
if is_accentuated(character) and is_accentuated(self._last_latin_character):
if remove_accent(character) == remove_accent(self._last_latin_character):
self._successive_count += 1
self._last_latin_character = character
def reset(self) -> None:
self._successive_count = 0
self._character_count = 0
self._last_latin_character = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.
return (self._successive_count * 2) / self._character_count
class SuspiciousRange(MessDetectorPlugin):
def __init__(self):
self._suspicious_successive_range_count = 0 # type: int
self._character_count = 0 # type: int
self._last_printable_seen = None # type: Optional[str]
def eligible(self, character: str) -> bool:
return character.isprintable()
def feed(self, character: str) -> None:
self._character_count += 1
if self._last_printable_seen is None:
self._last_printable_seen = character
return
if character.isspace() or is_punctuation(character):
self._last_printable_seen = None
return
unicode_range_a = unicode_range(self._last_printable_seen) # type: Optional[str]
unicode_range_b = unicode_range(character) # type: Optional[str]
if is_suspiciously_successive_range(unicode_range_a, unicode_range_b):
self._suspicious_successive_range_count += 1
self._last_printable_seen = character
def reset(self) -> None:
self._character_count = 0
self._suspicious_successive_range_count = 0
self._last_printable_seen = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.
ratio_of_suspicious_range_usage = (self._suspicious_successive_range_count * 2) / self._character_count # type: float
if ratio_of_suspicious_range_usage < 0.1:
return 0.
return ratio_of_suspicious_range_usage
class SuperWeirdWordPlugin(MessDetectorPlugin):
def __init__(self):
self._word_count = 0 # type: int
self._bad_word_count = 0 # type: int
self._is_current_word_bad = False # type: bool
self._character_count = 0 # type: int
self._bad_character_count = 0 # type: int
self._buffer = "" # type: str
self._buffer_accent_count = 0 # type: int
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character.isalpha():
self._buffer = "".join([self._buffer, character])
if is_accentuated(character):
self._buffer_accent_count += 1
return
if not self._buffer:
return
if (character.isspace() or is_punctuation(character) or is_separator(character)) and self._buffer:
self._word_count += 1
buffer_length = len(self._buffer) # type: int
self._character_count += buffer_length
if buffer_length >= 4 and self._buffer_accent_count / buffer_length >= 0.3:
self._is_current_word_bad = True
if self._is_current_word_bad:
self._bad_word_count += 1
self._bad_character_count += len(self._buffer)
self._is_current_word_bad = False
self._buffer = ""
self._buffer_accent_count = 0
elif character not in {"<", ">", "-", "="} and character.isdigit() is False and is_symbol(character):
self._is_current_word_bad = True
self._buffer += character
def reset(self) -> None:
self._buffer = ""
self._is_current_word_bad = False
self._bad_word_count = 0
self._word_count = 0
self._character_count = 0
self._bad_character_count = 0
@property
def ratio(self) -> float:
if self._word_count <= 16:
return 0.
return self._bad_character_count / self._character_count
class CjkInvalidStopPlugin(MessDetectorPlugin):
"""
GB(Chinese) based encoding often render the stop incorrectly when the content does not fit and can be easily detected.
Searching for the overuse of '丅' and '丄'.
"""
def __init__(self):
self._wrong_stop_count = 0 # type: int
self._cjk_character_count = 0 # type: int
def eligible(self, character: str) -> bool:
return True
def feed(self, character: str) -> None:
if character in ["丅", "丄"]:
self._wrong_stop_count += 1
return
if is_cjk(character):
self._cjk_character_count += 1
def reset(self) -> None:
self._wrong_stop_count = 0
self._cjk_character_count = 0
@property
def ratio(self) -> float:
if self._cjk_character_count < 16:
return 0.
return self._wrong_stop_count / self._cjk_character_count
class ArchaicUpperLowerPlugin(MessDetectorPlugin):
def __init__(self):
self._buf = False # type: bool
self._character_count_since_last_sep = 0 # type: int
self._successive_upper_lower_count = 0 # type: int
self._successive_upper_lower_count_final = 0 # type: int
self._character_count = 0 # type: int
self._last_alpha_seen = None # type: Optional[str]
def eligible(self, character: str) -> bool:
return character.isspace() or character.isalpha()
def feed(self, character: str) -> None:
if is_separator(character):
if self._character_count_since_last_sep < 24:
self._successive_upper_lower_count_final += self._successive_upper_lower_count
self._successive_upper_lower_count = 0
self._character_count_since_last_sep = 0
if self._last_alpha_seen is not None:
if (character.isupper() and self._last_alpha_seen.islower()) or (character.islower() and self._last_alpha_seen.isupper()):
if self._buf is True:
self._successive_upper_lower_count += 1
else:
self._buf = True
else:
self._buf = False
self._character_count += 1
self._last_alpha_seen = character
def reset(self) -> None:
self._character_count = 0
self._character_count_since_last_sep = 0
self._successive_upper_lower_count = 0
self._successive_upper_lower_count_final = 0
self._last_alpha_seen = None
@property
def ratio(self) -> float:
if self._character_count == 0:
return 0.
return (self._successive_upper_lower_count_final * 2) / self._character_count
def is_suspiciously_successive_range(unicode_range_a: Optional[str], unicode_range_b: Optional[str]) -> bool:
"""
Determine if two Unicode range seen next to each other can be considered as suspicious.
"""
if unicode_range_a is None or unicode_range_b is None:
return True
if unicode_range_a == unicode_range_b:
return False
if "Latin" in unicode_range_a and "Latin" in unicode_range_b:
return False
if "Emoticons" in unicode_range_a or "Emoticons" in unicode_range_b:
return False
keywords_range_a, keywords_range_b = unicode_range_a.split(" "), unicode_range_b.split(" ")
for el in keywords_range_a:
if el in UNICODE_SECONDARY_RANGE_KEYWORD:
continue
if el in keywords_range_b:
return False
# Japanese Exception
if unicode_range_a in ['Katakana', 'Hiragana'] and unicode_range_b in ['Katakana', 'Hiragana']:
return False
if unicode_range_a in ['Katakana', 'Hiragana'] or unicode_range_b in ['Katakana', 'Hiragana']:
if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
return False
if "Hangul" in unicode_range_a or "Hangul" in unicode_range_b:
if "CJK" in unicode_range_a or "CJK" in unicode_range_b:
return False
if unicode_range_a == "Basic Latin" or unicode_range_b == "Basic Latin":
return False
# Chinese/Japanese use dedicated range for punctuation and/or separators.
if ('CJK' in unicode_range_a or 'CJK' in unicode_range_b) or (unicode_range_a in ['Katakana', 'Hiragana'] and unicode_range_b in ['Katakana', 'Hiragana']):
if 'Punctuation' in unicode_range_a or 'Punctuation' in unicode_range_b:
return False
if 'Forms' in unicode_range_a or 'Forms' in unicode_range_b:
return False
return True
@lru_cache(maxsize=2048)
def mess_ratio(decoded_sequence: str, maximum_threshold: float = 0.2, debug: bool = False) -> float:
"""
Compute a mess ratio given a decoded bytes sequence. The maximum threshold does stop the computation earlier.
"""
detectors = [] # type: List[MessDetectorPlugin]
for md_class in MessDetectorPlugin.__subclasses__():
detectors.append(
md_class()
)
length = len(decoded_sequence) # type: int
mean_mess_ratio = 0. # type: float
if length < 512:
intermediary_mean_mess_ratio_calc = 32 # type: int
elif length <= 1024:
intermediary_mean_mess_ratio_calc = 64
else:
intermediary_mean_mess_ratio_calc = 128
for character, index in zip(decoded_sequence, range(0, length)):
for detector in detectors:
if detector.eligible(character):
detector.feed(character)
if (index > 0 and index % intermediary_mean_mess_ratio_calc == 0) or index == length-1:
mean_mess_ratio = sum(
[
dt.ratio for dt in detectors
]
)
if mean_mess_ratio >= maximum_threshold:
break
if debug:
for dt in detectors: # pragma: nocover
print(
dt.__class__,
dt.ratio
)
return round(
mean_mess_ratio,
3
) | PypiClean |
/ADMIRE_maastricht-1.1.1.1.zip/ADMIRE_maastricht-1.1.1.1/ADMIRE_maastricht/MatplotlibWidget5.py | __version__ = "1.0.0"
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtCore import QSize
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as Canvas
from matplotlib.figure import Figure
from matplotlib import rcParams
rcParams['font.size'] = 9
class MatplotlibWidget(Canvas):
"""
MatplotlibWidget inherits PyQt4.QtGui.QWidget
and matplotlib.backend_bases.FigureCanvasBase
Options: option_name (default_value)
-------
parent (None): parent widget
title (''): figure title
xlabel (''): X-axis label
ylabel (''): Y-axis label
xlim (None): X-axis limits ([min, max])
ylim (None): Y-axis limits ([min, max])
xscale ('linear'): X-axis scale
yscale ('linear'): Y-axis scale
width (4): width in inches
height (3): height in inches
dpi (100): resolution in dpi
hold (False): if False, figure will be cleared each time plot is called
Widget attributes:
-----------------
figure: instance of matplotlib.figure.Figure
axes: figure axes
Example:
-------
self.widget = MatplotlibWidget(self, yscale='log', hold=True)
from numpy import linspace
x = linspace(-10, 10)
self.widget.axes.plot(x, x**2)
self.wdiget.axes.plot(x, x**3)
"""
def __init__(self, parent=None, title='', xlabel='', ylabel='',
xlim=None, ylim=None, xscale='linear', yscale='linear',
width=4, height=3, dpi=100, hold=False):
self.figure = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.figure.add_subplot(111)
self.axes.set_title(title)
self.axes.set_xlabel(xlabel)
self.axes.set_ylabel(ylabel)
if xscale is not None:
self.axes.set_xscale(xscale)
if yscale is not None:
self.axes.set_yscale(yscale)
if xlim is not None:
self.axes.set_xlim(*xlim)
if ylim is not None:
self.axes.set_ylim(*ylim)
#self.axes.hold(hold)
Canvas.__init__(self, self.figure)
self.setParent(parent)
Canvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Expanding)
Canvas.updateGeometry(self)
def sizeHint(self):
w, h = self.get_width_height()
return QSize(w, h)
def minimumSizeHint(self):
return QSize(10, 10)
#===============================================================================
# Example
#===============================================================================
if __name__ == '__main__':
import sys
from PyQt5.QtGui import QMainWindow, QApplication
from numpy import linspace
class ApplicationWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.mplwidget = MatplotlibWidget(self, title='Example',
xlabel='Linear scale',
ylabel='Log scale',
hold=True, yscale='log')
self.mplwidget.setFocus()
self.setCentralWidget(self.mplwidget)
self.plot(self.mplwidget.axes)
def plot(self, axes):
x = linspace(-10, 10)
axes.plot(x, x**2)
axes.plot(x, x**3)
app = QApplication(sys.argv)
win = ApplicationWindow()
win.show()
sys.exit(app.exec_()) | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/ed_ipc.py | __author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: ed_ipc.py 72623 2012-10-06 19:33:06Z CJP $"
__revision__ = "$Revision: 72623 $"
#-----------------------------------------------------------------------------#
# Imports
import sys
import wx
import threading
import socket
import time
#import select
# Editra Libs
import util
import ed_xml
import ebmlib
#-----------------------------------------------------------------------------#
# Globals
# Port choosing algorithm ;)
EDPORT = (10 * int('ed', 16) + sum(ord(x) for x in "itr") + int('a', 16)) * 10
MSGEND = "*EDEND*"
# Xml Implementation
EDXML_IPC = "edipc"
EDXML_FILELIST = "filelist"
EDXML_FILE = "file"
EDXML_ARGLIST = "arglist"
EDXML_ARG = "arg"
#-----------------------------------------------------------------------------#
edEVT_COMMAND_RECV = wx.NewEventType()
EVT_COMMAND_RECV = wx.PyEventBinder(edEVT_COMMAND_RECV, 1)
class IpcServerEvent(wx.PyCommandEvent):
"""Event to signal the server has recieved some commands"""
def __init__(self, etype, eid, values=None):
"""Creates the event object"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._value = values
def GetCommands(self):
"""Returns the list of commands sent to the server
@return: the value of this event
"""
return self._value
#-----------------------------------------------------------------------------#
class EdIpcServer(threading.Thread):
"""Create an instance of IPC server for Editra. IPC is handled through
a socket connection to an instance of this server listening on L{EDPORT}.
The server will receive commands and dispatch them to the app.
Messages sent to the server must be in the following format.
AuthenticationKey;Message Data;MSGEND
The _AuthenticationKey_ is the same as the key that started the server it
is used to validate that messages are coming from a legitimate source.
_Message Data_ is a string of data where items are separated by a single
';' character. If you use L{SendCommands} to communicate with the server
then this message separators are handled internally by that method.
L{MSGEND} is the token to signify that the client is finished sending
commands to the server. When using L{SendCommands} this is also
automatically handled.
@todo: investigate possible security issues
"""
def __init__(self, app, key, port=EDPORT):
"""Create the server thread
@param app: Application object the server belongs to
@param key: Unique user authentication key (string)
@keyword port: TCP port to attempt to connect to
"""
super(EdIpcServer, self).__init__()
# Attributes
self._exit = False
self.__key = key
self.app = app
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Setup
## Try new ports till we find one that we can use
while True:
try:
self.socket.bind(('127.0.0.1', port))
break
except:
port += 1
global EDPORT
EDPORT = port
self.socket.listen(5)
def Shutdown(self):
"""Tell the server to exit"""
self._exit = True
# Wake up the server in case its waiting
# TODO: should add a specific exit event message
SendCommands(IPCCommand(), self.__key)
def run(self):
"""Start the server. The server runs in blocking mode, this
shouldn't be an issue as it should rarely need to respond to
anything.
"""
while not self._exit:
try:
client, addr = self.socket.accept()
if self._exit:
break
# Block for up to 2 seconds while reading
start = time.time()
recieved = ''
while time.time() < start + 2:
recieved += client.recv(4096)
if recieved.endswith(MSGEND):
break
# If message key is correct and the message is ended, process
# the input and dispatch to the app.
if recieved.startswith(self.__key) and recieved.endswith(MSGEND):
# Strip the key
recieved = recieved.replace(self.__key, '', 1)
# Strip the end token
xmlstr = recieved.rstrip(MSGEND).strip(";")
# Parse the xml
exml = IPCCommand()
try:
# Well formed xml must be utf-8 string not Unicode
if not ebmlib.IsUnicode(xmlstr):
xmlstr = unicode(xmlstr, sys.getfilesystemencoding())
xmlstr = xmlstr.encode('utf-8')
exml = IPCCommand.parse(xmlstr)
except Exception, msg:
# Log and ignore parsing errors
logmsg = "[ed_ipc][err] Parsing failed: %s\n" % msg
xmlstr = xmlstr.replace('\n', '').strip()
logmsg += "Bad xml was: %s" % repr(xmlstr)
util.Log(logmsg)
continue
evt = IpcServerEvent(edEVT_COMMAND_RECV, wx.ID_ANY, exml)
wx.CallAfter(wx.PostEvent, self.app, evt)
except socket.error:
# TODO: Better error handling
self._exit = True
# Shutdown Server
try:
self.socket.shutdown(socket.SHUT_RDWR)
except:
pass
self.socket.close()
#-----------------------------------------------------------------------------#
def SendCommands(xmlobj, key):
"""Send commands to the running instance of Editra
@param xmlobj: EditraXml Object
@param key: Server session authentication key
@return: bool
"""
assert isinstance(xmlobj, ed_xml.EdXml), "SendCommands expects an xml object"
# Build the edipc protocol msg
cmds = list()
cmds.insert(0, key)
cmds.append(xmlobj.GetXml())
cmds.append(MSGEND)
try:
# Setup the client socket
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', EDPORT))
# Server expects commands delimited by ;
message = ";".join(cmds)
client.send(message)
client.shutdown(socket.SHUT_RDWR)
client.close()
except Exception, msg:
util.Log("[ed_ipc][err] Failed in SendCommands: %s" % msg)
return False
else:
return True
#-----------------------------------------------------------------------------#
# Command Serialization
class IPCFile(ed_xml.EdXml):
"""Xml object for holding the list of files
@verbatim <file value="/path/to/file"/> @endverbatim
"""
class meta:
tagname = EDXML_FILE
value = ed_xml.String(required=True)
class IPCArg(ed_xml.EdXml):
"""Xml object for holding the list of args
@verbatim <arg name="test" value="x"/> @endverbatim
"""
class meta:
tagname = EDXML_ARG
name = ed_xml.String(required=True)
value = ed_xml.String(required=True)
class IPCCommand(ed_xml.EdXml):
"""IPC XML Command"""
class meta:
tagname = EDXML_IPC
filelist = ed_xml.List(ed_xml.Model(IPCFile))
arglist = ed_xml.List(ed_xml.Model(IPCArg)) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/charting/action2d/_IndicatorElement.js.uncompressed.js | define("dojox/charting/action2d/_IndicatorElement", ["dojo/_base/lang", "dojo/_base/declare", "../Element", "../plot2d/common",
"../axis2d/common", "dojox/gfx"],
function(lang, declare, Element, dcpc, dcac, gfx){
// all the code below should be removed when http://trac.dojotoolkit.org/ticket/11299 will be available
var getBoundingBox = function(shape){
return getTextBBox(shape, shape.getShape().text);
};
var getTextBBox = function(s, t){
var c = s.declaredClass;
if (c.indexOf("svg")!=-1){
// try/catch the FF native getBBox error. cheaper than walking up in the DOM
// hierarchy to check the conditions (bench show /10 )
try {
return lang.mixin({}, s.rawNode.getBBox());
}catch (e){
return null;
}
}else if(c.indexOf("vml")!=-1){
var rawNode = s.rawNode, _display = rawNode.style.display;
rawNode.style.display = "inline";
var w = gfx.pt2px(parseFloat(rawNode.currentStyle.width));
var h = gfx.pt2px(parseFloat(rawNode.currentStyle.height));
var sz = {x: 0, y: 0, width: w, height: h};
// in VML, the width/height we get are in view coordinates
// in our case we don't zoom the view so that is ok
// It's impossible to get the x/y from the currentStyle.left/top,
// because all negative coordinates are 'clipped' to 0.
// (x:0 + translate(-100) -> x=0
computeLocation(s, sz);
rawNode.style.display = _display;
return sz;
}else if(c.indexOf("silverlight")!=-1){
var bb = {width: s.rawNode.actualWidth, height: s.rawNode.actualHeight};
return computeLocation(s, bb, 0.75);
}else if(s.getTextWidth){
// canvas
var w = s.getTextWidth();
var font = s.getFont();
var fz = font ? font.size : gfx.defaultFont.size;
var h = gfx.normalizedLength(fz);
sz = {width: w, height: h};
computeLocation(s, sz, 0.75);
return sz;
}
};
var computeLocation = function(s, sz, coef){
var width = sz.width, height = sz.height, sh = s.getShape(), align = sh.align;
switch (align) {
case "end":
sz.x = sh.x - width;
break;
case "middle":
sz.x = sh.x - width / 2;
break;
case "start":
default:
sz.x = sh.x;
break;
}
coef = coef || 1;
sz.y = sh.y - height*coef; // rough approximation of the ascent!...
return sz;
};
return declare("dojox.charting.action2d._IndicatorElement",[Element], {
// summary:
// Internal element used by indicator actions.
// tags:
// private
constructor: function(chart, kwArgs){
if(!kwArgs){ kwArgs = {}; }
this.inter = kwArgs.inter;
},
_updateVisibility: function(cp, limit, attr){
var axis = attr=="x"?this.inter.plot._hAxis:this.inter.plot._vAxis;
var scale = axis.getWindowScale();
this.chart.setAxisWindow(axis.name, scale, axis.getWindowOffset() + (cp[attr] - limit[attr]) / scale);
this._noDirty = true;
this.chart.render();
this._noDirty = false;
if(!this._tracker){
this.initTrack();
}
},
_trackMove: function(){
// let's update the selector
this._updateIndicator(this.pageCoord);
// if we reached that point once, then we don't stop until mouse up
if(this._initTrackPhase){
this._initTrackPhase = false;
this._tracker = setInterval(lang.hitch(this, this._trackMove), 100);
}
},
initTrack: function(){
this._initTrackPhase = true;
this._tracker = setTimeout(lang.hitch(this, this._trackMove), 500);
},
stopTrack: function(){
if(this._tracker){
if(this._initTrackPhase){
clearTimeout(this._tracker);
}else{
clearInterval(this._tracker);
}
this._tracker = null;
}
},
render: function(){
if(!this.isDirty()){
return;
}
this.cleanGroup();
if (!this.pageCoord){
return;
}
this._updateIndicator(this.pageCoord, this.secondCoord);
},
_updateIndicator: function(cp1, cp2){
var inter = this.inter, plot = inter.plot, v = inter.opt.vertical;
var hAxis = this.chart.getAxis(plot.hAxis), vAxis = this.chart.getAxis(plot.vAxis);
var hn = hAxis.name, vn = vAxis.name, hb = hAxis.getScaler().bounds, vb = vAxis.getScaler().bounds;
var attr = v?"x":"y", n = v?hn:vn, bounds = v?hb:vb;
// sort data point
if(cp2){
var tmp;
if(v){
if(cp1.x>cp2.x){
tmp = cp2;
cp2 = cp1;
cp1 = tmp;
}
}else{
if(cp1.y>cp2.y){
tmp = cp2;
cp2 = cp1;
cp1 = tmp;
}
}
}
var cd1 = plot.toData(cp1), cd2;
if(cp2){
cd2 = plot.toData(cp2);
}
var o = {};
o[hn] = hb.from;
o[vn] = vb.from;
var min = plot.toPage(o);
o[hn] = hb.to;
o[vn] = vb.to;
var max = plot.toPage(o);
if(cd1[n] < bounds.from){
// do not autoscroll if dual indicator
if(!cd2 && inter.opt.autoScroll){
this._updateVisibility(cp1, min, attr);
return;
}else{
cp1[attr] = min[attr];
}
// cp1 might have changed, let's update cd1
cd1 = plot.toData(cp1);
}else if(cd1[n] > bounds.to){
if(!cd2 && inter.opt.autoScroll){
this._updateVisibility(cp1, max, attr);
return;
}else{
cp1[attr] = max[attr];
}
// cp1 might have changed, let's update cd1
cd1 = plot.toData(cp1);
}
var c1 = this._getData(cd1, attr, v), c2;
if(c1.y == null){
// we have no data for that point let's just return
return;
}
if(cp2){
if(cd2[n] < bounds.from){
cp2[attr] = min[attr];
cd2 = plot.toData(cp2);
}else if(cd2[n] > bounds.to){
cp2[attr] = max[attr];
cd2 = plot.toData(cp2);
}
c2 = this._getData(cd2, attr, v);
if(c2.y == null){
// we have no data for that point let's pretend we have a single touch point
cp2 = null;
}
}
var t1 = this._renderIndicator(c1, cp2?1:0, hn, vn, min, max);
if(cp2){
var t2 = this._renderIndicator(c2, 2, hn, vn, min, max);
var delta = v?c2.y-c1.y:c2.x-c1.y;
var text = inter.opt.labelFunc?inter.opt.labelFunc(c1, c2, inter.opt.fixed, inter.opt.precision):
(dcpc.getLabel(delta, inter.opt.fixed, inter.opt.precision)+" ("+dcpc.getLabel(100*delta/(v?c1.y:c1.x), true, 2)+"%)");
this._renderText(text, inter, this.chart.theme, v?(t1.x+t2.x)/2:t1.x, v?t1.y:(t1.y+t2.y)/2, c1, c2);
};
},
_renderIndicator: function(coord, index, hn, vn, min, max){
var t = this.chart.theme, c = this.chart.getCoords(), inter = this.inter, plot = inter.plot, v = inter.opt.vertical;
var mark = {};
mark[hn] = coord.x;
mark[vn] = coord.y;
mark = plot.toPage(mark);
var cx = mark.x - c.x, cy = mark.y - c.y;
var x1 = v?cx:min.x - c.x, y1 = v?min.y - c.y:cy, x2 = v?x1:max.x - c.x, y2 = v?max.y - c.y:y1;
var sh = inter.opt.lineShadow?inter.opt.lineShadow:t.indicator.lineShadow,
ls = inter.opt.lineStroke?inter.opt.lineStroke:t.indicator.lineStroke,
ol = inter.opt.lineOutline?inter.opt.lineOutline:t.indicator.lineOutline;
if(sh){
this.group.createLine({x1: x1 + sh.dx, y1: y1 + sh.dy, x2: x2 + sh.dx, y2: y2 + sh.dy}).setStroke(sh);
}
if(ol){
ol = dcpc.makeStroke(ol);
ol.width = 2 * ol.width + ls.width;
this.group.createLine({x1: x1, y1: y1, x2: x2, y2: y2}).setStroke(ol);
}
this.group.createLine({x1: x1, y1: y1, x2: x2, y2: y2}).setStroke(ls);
var ms = inter.opt.markerSymbol?inter.opt.markerSymbol:t.indicator.markerSymbol,
path = "M" + cx + " " + cy + " " + ms;
sh = inter.opt.markerShadow?inter.opt.markerShadow:t.indicator.markerShadow;
ls = inter.opt.markerStroke?inter.opt.markerStroke:t.indicator.markerStroke;
ol = inter.opt.markerOutline?inter.opt.markerOutline:t.indicator.markerOutline;
if(sh){
var sp = "M" + (cx + sh.dx) + " " + (cy + sh.dy) + " " + ms;
this.group.createPath(sp).setFill(sh.color).setStroke(sh);
}
if(ol){
ol = dcpc.makeStroke(ol);
ol.width = 2 * ol.width + ls.width;
this.group.createPath(path).setStroke(ol);
}
var shape = this.group.createPath(path);
var sf = this._shapeFill(inter.opt.markerFill?inter.opt.markerFill:t.indicator.markerFill, shape.getBoundingBox());
shape.setFill(sf).setStroke(ls);
if(index==0){
var text = inter.opt.labelFunc?inter.opt.labelFunc(coord, null, inter.opt.fixed, inter.opt.precision):
dcpc.getLabel(v?coord.y:coord.x, inter.opt.fixed, inter.opt.precision);
this._renderText(text, inter, t, v?x1:x2+5, v?y2+5:y1, coord);
}else{
return v?{x: x1, y: y2+5}:{x: x2+5, y: y1};
}
},
_renderText: function(text, inter, t, x, y, c1, c2){
var label = dcac.createText.gfx(
this.chart,
this.group,
x, y,
"middle",
text, inter.opt.font?inter.opt.font:t.indicator.font, inter.opt.fontColor?inter.opt.fontColor:t.indicator.fontColor);
var b = getBoundingBox(label);
b.x-=2; b.y-=1; b.width+=4; b.height+=2; b.r = inter.opt.radius?inter.opt.radius:t.indicator.radius;
sh = inter.opt.shadow?inter.opt.shadow:t.indicator.shadow;
ls = inter.opt.stroke?inter.opt.stroke:t.indicator.stroke;
ol = inter.opt.outline?inter.opt.outline:t.indicator.outline;
if(sh){
this.group.createRect(b).setFill(sh.color).setStroke(sh);
}
if(ol){
ol = dcpc.makeStroke(ol);
ol.width = 2 * ol.width + ls.width;
this.group.createRect(b).setStroke(ol);
}
var f = inter.opt.fillFunc?inter.opt.fillFunc(c1, c2):(inter.opt.fill?inter.opt.fill:t.indicator.fill);
this.group.createRect(b).setFill(this._shapeFill(f, b)).setStroke(ls);
label.moveToFront();
},
_getData: function(cd, attr, v){
// we need to find which actual data point is "close" to the data value
var data = this.chart.getSeries(this.inter.opt.series).data;
// let's consider data are sorted because anyway rendering will be "weird" with unsorted data
// i is an index in the array, which is different from a x-axis value even for index based data
var i, r, l = data.length;
for (i = 0; i < l; ++i){
r = data[i];
if(r == null){
// move to next item
}else if(typeof r == "number"){
if(i + 1 > cd[attr]){
break;
}
}else if(r[attr] > cd[attr]){
break;
}
}
var x,y,px,py;
if(typeof r == "number"){
x = i+1;
y = r;
if(i>0){
px = i;
py = data[i-1];
}
}else{
x = r.x;
y = r.y;
if(i>0){
px = data[i-1].x;
py = data[i-1].y;
}
}
if(i>0){
var m = v?(x+px)/2:(y+py)/2;
if(cd[attr]<=m){
x = px;
y = py;
}
}
return {x: x, y: y};
},
cleanGroup: function(creator){
// summary:
// Clean any elements (HTML or GFX-based) out of our group, and create a new one.
// creator: dojox.gfx.Surface?
// An optional surface to work with.
// returns: dojox.charting.Element
// A reference to this object for functional chaining.
this.inherited(arguments);
// we always want to be above regular plots and not clipped
this.group.moveToFront();
return this; // dojox.charting.Element
},
clear: function(){
// summary:
// Clear out any parameters set on this plot.
// returns: dojox.charting.action2d._IndicatorElement
// The reference to this plot for functional chaining.
this.dirty = true;
return this; // dojox.charting.plot2d._IndicatorElement
},
getSeriesStats: function(){
// summary:
// Returns default stats (irrelevant for this type of plot).
// returns: Object
// {hmin, hmax, vmin, vmax} min/max in both directions.
return lang.delegate(dcpc.defaultStats);
},
initializeScalers: function(){
// summary:
// Does nothing (irrelevant for this type of plot).
return this;
},
isDirty: function(){
// summary:
// Return whether or not this plot needs to be redrawn.
// returns: Boolean
// If this plot needs to be rendered, this will return true.
return !this._noDirty && (this.dirty || this.inter.plot.isDirty());
}
});
}); | PypiClean |
/BindingGP-0.0.36.tar.gz/BindingGP-0.0.36/bgp/gp.py |
# @Time : 2019/11/12 15:13
# @Email : [email protected]
# @Software: PyCharm
# @License: GNU Lesser General Public License v3.0
"""
Notes:
This part are one copy from deap,
change the random to numpy.random.
"""
import copy
import operator
import sys
from collections import Counter
from functools import wraps
from inspect import isclass
from operator import attrgetter
import numpy as np
from deap.tools import Statistics, MultiStatistics
from numpy import random
from bgp.calculation.scores import score_dim
######################################
# Generate #
######################################
def checkss(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
pset = kwargs["pset"]
for i in result[0].top():
assert i in pset.dispose
for i in result[0].bot():
assert i in pset.primitives + pset.terminals_and_constants
return result
return wrapper
def checks_number(func):
@wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if "force_number" in kwargs:
fs = kwargs["force_number"]
else:
fs = False
if fs and not len(result):
raise TypeError("The DimForceLoop keep the dim of the select offspring,\n"
"But the select number from last population are zero at the current dim_type limitation.\n"
"Please change the dim_type or change the DimForceLoop method to others")
return result
return wrapper
def generate(pset, min_, max_, condition, personal_map=False, *kwargs):
"""
generate expression.
Parameters
----------
pset: SymbolSet
pset
min_: int
Minimum height of the produced trees.
max_: int
Maximum Height of the produced trees.
condition: collections.Callable
The condition is a function that takes two arguments,
the height of the tree to build and the current
depth in the tree.
kwargs: None
placeholder for future
personal_map:bool
premap
"""
_ = kwargs
type_ = object
expr = []
height = random.randint(min_, max_)
stack = [(0, type_)]
while len(stack) != 0:
depth, type_ = stack.pop()
if condition(height, depth):
try:
if personal_map:
p_t = pset.premap.get_ind_value(expr, pset)
else:
p_t = pset.prob_ter_con_list
if p_t is None:
p_t = pset.prob_ter_con_list
term = pset.terminals_and_constants[random.choice(len(pset.terminals_and_constants),
p=p_t)]
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError("The symbol.generate function tried to add "
"a terminalm, but there is "
"none available.").with_traceback(traceback)
if isclass(term):
term = term()
expr.append(term)
else:
try:
prim = pset.primitives[random.choice(len(pset.primitives), p=pset.prob_pri_list)]
except IndexError:
_, _, traceback = sys.exc_info()
raise IndexError("The symbol.generate function tried to add "
"a primitive', but there is "
"none available.").with_traceback(traceback)
expr.append(prim)
for arg in reversed(prim.args):
stack.append((depth + 1, arg))
dispose = list(random.choice(pset.dispose, len(expr), p=pset.prob_dispose_list))
if pset.types == 1:
add_ = [pset.dispose_dict["Self"]]
dispose = add_ * len(expr)
elif pset.types == 2:
add_ = list(pset.dispose_dict[i] for i in ["MAdd", "MSub", "MMul", "MDiv"])
dispose[0] = random.choice(add_, p=[0.25, 0.25, 0.25, 0.25])
else:
add_ = list(pset.dispose_dict[i] for i in ["MAdd", "MMul"])
dispose[0] = random.choice(add_, p=[0.5, 0.5])
re = []
for i, j in zip(dispose, expr):
re.extend((i, j))
return re
def genGrow(pset, min_, max_, personal_map=False, ):
"""Generate an expression where each leaf might have a different depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param personal_map: bool.
:returns: A grown tree with leaves at possibly different depths.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height
or when it is randomly determined that a node should be a terminal.
"""
return depth == height or (depth >= min_ and random.random() < pset.terminalRatio)
return generate(pset, min_, max_, condition, personal_map=personal_map)
def depart(individual):
"""take part expression."""
if len(individual) <= 10 or individual.height <= 8:
return [individual, ]
else:
inds = []
for index in np.arange(2, len(individual) - 4, step=2):
slice_ = individual.searchSubtree(index)
ind_new = individual.__class__(individual[slice_])
if 6 <= len(ind_new) <= 10 or 4 <= ind_new.height <= 8:
if len(ind_new.ter_site()) >= 2:
ind_new[0] = individual[0]
inds.append(ind_new)
return inds
def genFull(pset, min_, max_, personal_map=False):
"""Generate an expression where each leaf has the same depth
between *min* and *max*.
:param pset: Primitive set from which primitives are selected.
:param min_: Minimum height of the produced trees.
:param max_: Maximum Height of the produced trees.
:param personal_map:
:returns: A full tree with all leaves at the same depth.
"""
def condition(height, depth):
"""Expression generation stops when the depth is equal to height."""
return depth == height
return generate(pset, min_, max_, condition, personal_map=personal_map)
def genHalf(pset, min_, max_, personal_map=False):
a = random.rand()
if a > 0.5:
return genFull(pset, min_, max_, personal_map=personal_map)
else:
return genGrow(pset, min_, max_, personal_map=personal_map)
######################################
# crossover #
######################################
def cxOnePoint(ind10, ind20):
"""Randomly select crossover point in each individual and exchange each
subtree with the point as root between each individual.
:param ind10: First tree participating in the crossover.
:param ind20: Second tree participating in the crossover.
:returns: A tuple of two trees.
"""
ind1 = copy.copy(ind10)
ind2 = copy.copy(ind20)
if len(ind1) < 4 or len(ind2) < 4:
return ind1, ind2
#
else:
index1 = random.choice(np.arange(2, len(ind1) - 1, 2))
index2 = random.choice(np.arange(2, len(ind2) - 1, 2))
slice1 = ind1.searchSubtree(index1)
slice2 = ind2.searchSubtree(index2)
ind1[slice1], ind2[slice2] = ind2[slice2], ind1[slice1]
return ind1, ind2
######################################
# limitation #
######################################
def staticLimit(key, max_value):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
keep_inds = [copy.deepcopy(ind) for ind in args]
new_inds = list(func(*args, **kwargs))
for i, ind in enumerate(new_inds):
if key(ind) > max_value:
new_inds[i] = keep_inds[random.choice(len(keep_inds))]
return new_inds
return wrapper
return decorator
######################################
# mutate #
######################################
# @logg
# @checkss
def mutUniform(individual, expr, pset):
"""Randomly select a point in the tree *individual*, then replace the
subtree at that point as a root by the expression generated using method
:func:`expr`.
:param individual: The tree to be mutated.
:param expr: A function object that can generate an expression when
called.
:param pset: SymbolSet
:returns: A tuple of one tree.
"""
individual = copy.copy(individual)
index = random.choice(len(individual))
if index % 2 == 1:
index -= 1
slice_ = individual.searchSubtree(index)
individual[slice_] = expr(pset=pset)
return individual,
# @logg
# @checkss
def mutShrink(individual, pset=None):
"""This operator shrinks the *individual* by choosing randomly a branch and
replacing it with one of the branch's arguments (also randomly chosen).
:param individual: The tree to be shrinked.
:param pset: SymbolSet.
:returns: A tuple of one tree.
"""
_ = pset
# We don't want to "shrink" the root
if len(individual) < 4 or individual.height < 4:
return individual,
individual = copy.copy(individual)
index = random.randint(0, len(individual))
if index % 2 == 1:
index -= 1
slice_ = individual.searchSubtree(index)
ter = [i for i in individual[slice_] if i.arity == 0]
left = random.choice(ter)
hat = random.choice(pset.dispose, p=pset.prob_dispose_list)
del individual[slice_]
individual.insert(index, left)
individual.insert(index, hat)
return individual,
# @logg
# @checkss
def mutNodeReplacementVerbose(individual, pset, personal_map=False):
"""
choice terminals_and_constants verbose
Replaces a randomly chosen primitive from *individual* by a randomly
chosen primitive with the same number of arguments from the :attr:`pset`
attribute of the individual.
:param individual: The normal or typed tree to be mutated.
:param pset: SymbolSet
:param personal_map: bool
:returns: A tuple of one tree.
"""
if len(individual) < 4:
return individual,
individual = copy.copy(individual)
if pset.types > 1:
if random.random() <= 0.8:
index = random.choice(np.arange(1, len(individual), step=2))
else:
index = random.choice(np.arange(0, len(individual), step=2))
else:
index = random.choice(np.arange(0, len(individual), step=2))
node = individual[index]
if index % 2 == 0:
for i in pset.dispose:
assert i.arity == 1
prims = pset.dispose
p_d = np.array([pset.prob_dispose[repr(i)] for i in prims], 'float32')
p_d /= np.sum(p_d)
a = prims[random.choice(len(prims), p=p_d)]
individual[index] = a
else:
if node.arity == 0: # Terminal
if personal_map:
p_t = pset.premap.get_one_node_value(individual, pset, node=node, site=index)
if p_t is None:
p_t = pset.prob_ter_con_list
else:
p_t = pset.prob_ter_con_list
term = pset.terminals_and_constants[random.choice(len(pset.terminals_and_constants), p=p_t)]
individual[index] = term
else: # Primitive
prims = [p for p in pset.primitives if p.arity == node.arity]
p_p = np.array([pset.prob_pri[repr(i)] for i in prims], 'float32')
p_p /= np.sum(p_p)
# except:
a = prims[random.choice(len(prims), p=p_p)]
individual[index] = a
return individual,
# @logg
# @checkss
def mutDifferentReplacementVerbose(individual, pset, personal_map=False):
"""
choice terminals_and_constants verbose
Replaces a randomly chosen primitive from *individual* by a randomly
chosen primitive with the same number of arguments from the :attr:`pset`
attribute of the individual.
decrease the probability of same terminals.
:param individual: The normal or typed tree to be mutated.
:param pset: SymbolSet
:param personal_map: bool
:returns: A tuple of one tree.
"""
if len(individual) < 4:
return individual,
individual = copy.copy(individual)
ters = [repr(i) for i in individual.terminals()]
pset_ters = [repr(i) for i in pset.terminals_and_constants]
cou = Counter(ters)
cou_multil = {i: j for i, j in cou.items() if j >= 2}
ks = list(cou_multil.keys())
nks = list(set(pset_ters) - (set(ks)))
if len(nks) <= 1:
return individual,
nks.sort() # very import for random
p_nks = np.array([pset.prob_ter_con[i] for i in nks])
p_nks = p_nks.astype(float)
p_nks /= np.sum(p_nks)
if cou_multil:
indexs = []
for k, v in cou_multil.items():
indi = []
for i in np.arange(1, len(individual), 2):
if repr(individual[i]) == k:
indi.append(i)
if indi:
indexs.append(random.choice(indi))
if personal_map:
p_nks_new = pset.premap.get_nodes_value(ind=individual, pset=pset, node=None, site=indexs)
if p_nks_new is not None:
nks = list(pset.prob_ter_con.keys())
p_nks = p_nks_new
if len(indexs) <= len(nks):
term = random.choice(nks, len(indexs), replace=False, p=p_nks)
else:
term = random.choice(nks, len(indexs), replace=True, p=p_nks)
term_ters = []
for name in term:
for i in pset.terminals_and_constants:
if repr(i) == name:
term_ters.append(i)
for o, n in zip(indexs, term_ters):
individual[o] = n
return individual,
######################################
# select #
######################################
def selRandom(individuals, k):
"""Select *k* individuals at random from the input *individuals* with
replacement. The list returned contains references to the input
*individuals*.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:returns: A list of selected individuals.
This function uses the :func:`numpy.random.choice` function
"""
return [individuals[random.choice(len(individuals))] for _ in range(k)]
def selBest(individuals, k, fit_attr="fitness"):
"""Select the *k* best individuals among the input *individuals*. The
list returned contains references to the input *individuals*.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param fit_attr: The attribute of individuals to use as selection criterion
:returns: A list containing the k best individuals.
"""
return sorted(individuals, key=attrgetter(fit_attr), reverse=True)[:k]
def selTournament(individuals, k, tournsize, fit_attr="fitness"):
"""Select the best individual among *tournsize* randomly chosen
individuals, *k* times. The list returned contains
references to the input *individuals*.
:param individuals: A list of individuals to select from.
:param k: The number of individuals to select.
:param tournsize: The number of individuals participating in each tournament.
:param fit_attr: The attribute of individuals to use as selection criterion
:returns: A list of selected individuals.
This function uses the :func:`numpy.random.choice` function
"""
chosen = []
for i in range(k):
aspirants = selRandom(individuals, tournsize)
chosen.append(max(aspirants, key=attrgetter(fit_attr)))
return chosen
@checks_number
def selKbestDim(pop, K_best=10, dim_type=None, fuzzy=False, fit_attr="fitness", force_number=False):
"""
Select the individual with dim limitation.
Parameters
----------
pop: SymbolTree
A list of individuals to select from.
K_best:int
The number of individuals to select.
dim_type:Dim
fuzzy:bool
the dim or the dim with same base. such as m,m^2,m^3
fit_attr:str
The attribute of individuals to use as selection criterion, default attr is "fitness".
force_number:False
return the number the same with K.
Returns
-------
A list of selected individuals.
"""
chosen = sorted(pop, key=operator.attrgetter(fit_attr))
chosen.reverse()
choice_index = [score_dim(ind.y_dim, dim_type, fuzzy) for ind in chosen]
add_ind = [chosen[i] for i, j in enumerate(choice_index) if j == 1]
if K_best is None:
if len(add_ind) >= 5:
K_best = round(len(pop) / 10)
else:
K_best = 0
if len(add_ind) >= round(K_best):
return add_ind[:round(K_best)]
else:
if not force_number or len(add_ind) == 0:
return add_ind
else:
ti = K_best // len(add_ind)
yu = K_best % len(add_ind)
add_new = []
for i in range(ti):
add_new.extend(add_ind)
add_new.extend(add_ind[:yu])
return add_new
def Statis_func(stats=None):
if stats is None:
stats = {"fitness_dim_max": ("max",), "dim_is_target": ("sum",)}
func = {"max": np.max, "mean": np.mean, "min": np.min, "std": np.std, "sum": np.sum}
att = {
"fitness": lambda ind: ind.fitness.values[0],
"fitness_dim_max": lambda ind: ind.fitness.values[0] if ind.dim_score else -np.inf,
"fitness_dim_min": lambda ind: ind.fitness.values[0] if ind.dim_score else np.inf,
"dim_is_target": lambda ind: 1 if ind.dim_score else 0,
# special
"coef": lambda ind: score_dim(ind.y_dim, "coef", fuzzy=False),
"integer": lambda ind: score_dim(ind.y_dim, "integer", fuzzy=False),
"length": lambda ind: len(ind),
"height": lambda ind: ind.height,
"h_bgp": lambda ind: ind.h_bgp,
# multil-target
"weight_fitness": lambda ind: ind.fitness.wvalues,
"weight_fitness_dim": lambda ind: ind.fitness.wvalues if ind.dim_score else -np.inf,
# weight have mul the "-"
}
sa_all = {}
for a, f in stats.items():
if a in att:
a_s = att[a]
elif isinstance(callable, a):
a_s = a
a = str(a).split(" ")[1]
else:
raise TypeError("the key must be in definition or a function")
sa = Statistics(a_s)
if isinstance(f, str):
f = [f, ]
for i, fi in enumerate(f):
assert fi in func
ff = func[fi]
sa.register(fi, ff)
sa_all["Cal_%s" % a] = sa
stats = MultiStatistics(sa_all)
return stats
######################################
# shown #
######################################
def _graph(expr):
"""Construct the graph of a tree expression. The tree expression must be
valid. It returns in order a node list, an edge list, and a dictionary of
the per node labels. The node are represented by numbers, the edges are
tuples connecting two nodes (number), and the labels are values of a
dictionary for which keys are the node numbers.
:param expr: A tree expression to convert into a graph.
:returns: A node list, an edge list, and a dictionary of labels.
The returned objects can be used directly to populate a
`pygraphviz <http://networkx.lanl.gov/pygraphviz/>`_ graph::
import pygraphviz as pgv
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = pgv.AGraph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
g.layout(prog="dot")
for i in nodes:
n = g.get_node(i)
n.attr["label"] = labels[i]
g.draw("tree.pdf")
or a `NetworX <http://networkx.github.com/>`_ graph::
import matplotlib.pyplot as plt
import networkx as nx
# [...] Execution of code that produce a tree expression
nodes, edges, labels = graph(expr)
g = nx.Graph()
g.add_nodes_from(nodes)
g.add_edges_from(edges)
pos = nx.graphviz_layout(g, prog="dot")
nx.draw_networkx_nodes(g, pos)
nx.draw_networkx_edges(g, pos)
nx.draw_networkx_labels(g, pos, labels)
plt.show()
.. note::
We encourage you to use `pygraphviz
<http://networkx.lanl.gov/pygraphviz/>`_ as the nodes might be plotted
out of order when using `NetworX <http://networkx.github.com/>`_.
"""
nodes = list(range(len(expr)))
edges = list()
labels = dict()
stack = []
for i, node in enumerate(expr):
if stack:
edges.append((stack[-1][0], i))
stack[-1][1] -= 1
labels[i] = repr(node)
stack.append([i, node.arity])
while stack and stack[-1][1] == 0:
stack.pop()
return nodes, edges, labels
def varAnd(population, toolbox, cxpb, mutpb):
offspring = copy.deepcopy(population)
# Apply crossover and mutation on the offspring
for i in range(1, len(offspring), 2):
if random.random() < cxpb:
offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1],
offspring[i])
del offspring[i - 1].fitness.values, offspring[i].fitness.values
for i in range(len(offspring)):
if random.random() < mutpb:
offspring[i], = toolbox.mutate(offspring[i])
del offspring[i].fitness.values
return offspring
def varAndfus(population, toolbox, cxpb, mutpb, fus, mutpb_list=1.0):
"""
Parameters
----------
population
toolbox
cxpb
mutpb
fus
mutpb_list:float,list,None
Returns
-------
"""
offspring = copy.deepcopy(population)
# Apply crossover and mutation on the offspring
for i in range(1, len(offspring), 2):
if random.random() < cxpb:
offspring[i - 1], offspring[i] = toolbox.mate(offspring[i - 1],
offspring[i])
del offspring[i - 1].fitness.values, offspring[i].fitness.values
if isinstance(mutpb_list, float) or mutpb_list is None:
mutpb /= len(fus)
for j in fus:
for i in range(len(offspring)):
if random.random() < mutpb:
# print(random.random(), i)
offspring[i], = j(offspring[i])
del offspring[i].fitness.values
else:
assert len(fus) == len(mutpb_list)
mutpb_list = [i * mutpb for i in mutpb_list]
for j, m in zip(fus, mutpb_list):
for n, i in enumerate(offspring):
if random.random() < m:
k, = j(i)
else:
k = i
del k.fitness.values
offspring[n] = k
return offspring | PypiClean |
/NeodroidVision-0.3.0-py36-none-any.whl/neodroidvision/multitask/fission/skip_hourglass/decompress.py |
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 19-09-2021
"""
import torch
from torch import nn
from neodroidvision.multitask.fission.skip_hourglass.modes import MergeMode, UpscaleMode
__all__ = ["Decompress"]
class Decompress(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation follows each convolution."""
@staticmethod
def decompress(
in_channels: int,
out_channels: int,
*,
mode: UpscaleMode = UpscaleMode.FractionalTranspose,
factor: int = 2,
) -> nn.Module:
"""
:param in_channels:
:type in_channels:
:param out_channels:
:type out_channels:
:param mode:
:type mode:
:param factor:
:type factor:
:return:
:rtype:"""
if mode == UpscaleMode.FractionalTranspose:
return nn.ConvTranspose2d(
in_channels, out_channels, kernel_size=2, stride=factor
)
else:
# out_channels is always going to be the same as in_channels
return nn.Sequential(
nn.Upsample(mode="bilinear", scale_factor=factor, align_corners=True),
nn.Conv2d(in_channels, out_channels, kernel_size=1, groups=1, stride=1),
)
def __init__(
self,
in_channels: int,
out_channels: int,
*,
merge_mode: MergeMode = MergeMode.Concat,
upscale_mode: UpscaleMode = UpscaleMode.FractionalTranspose,
activation=torch.relu,
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.merge_mode = merge_mode
self.up_mode = upscale_mode
self.activation = activation
self.upconv = self.decompress(
self.in_channels, self.out_channels, mode=self.up_mode
)
if self.merge_mode == MergeMode.Concat:
self.conv1 = nn.Conv2d(
2 * self.out_channels,
self.out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True,
groups=1,
)
else: # num of input channels to conv2 is same
self.conv1 = nn.Conv2d(
self.out_channels,
self.out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True,
groups=1,
)
self.conv2 = nn.Conv2d(
self.out_channels,
self.out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=True,
groups=1,
)
def forward(self, from_down: torch.Tensor, from_up: torch.Tensor) -> torch.Tensor:
"""
Forward pass
Arguments:
from_down: tensor from the encoder pathway
from_up: upconv'd tensor from the decoder pathway"""
from_up = self.upconv(from_up)
if self.merge_mode == MergeMode.Concat:
x = torch.cat((from_up, from_down), 1)
else:
x = from_up + from_down
return self.activation(self.conv2(self.activation(self.conv1(x)))) | PypiClean |
/AnkiServer-2.0.6.tar.gz/AnkiServer-2.0.6/anki-bundled/anki/stdmodels.py |
from anki.lang import _
from anki.consts import MODEL_CLOZE
models = []
# Basic
##########################################################################
def addBasicModel(col):
mm = col.models
m = mm.new(_("Basic"))
fm = mm.newField(_("Front"))
mm.addField(m, fm)
fm = mm.newField(_("Back"))
mm.addField(m, fm)
t = mm.newTemplate(_("Card 1"))
t['qfmt'] = "{{"+_("Front")+"}}"
t['afmt'] = "{{FrontSide}}\n\n<hr id=answer>\n\n"+"{{"+_("Back")+"}}"
mm.addTemplate(m, t)
mm.add(m)
return m
models.append((lambda: _("Basic"), addBasicModel))
# Forward & Reverse
##########################################################################
def addForwardReverse(col):
mm = col.models
m = addBasicModel(col)
m['name'] = _("Basic (and reversed card)")
t = mm.newTemplate(_("Card 2"))
t['qfmt'] = "{{"+_("Back")+"}}"
t['afmt'] = "{{FrontSide}}\n\n<hr id=answer>\n\n"+"{{"+_("Front")+"}}"
mm.addTemplate(m, t)
return m
models.append((lambda: _("Basic (and reversed card)"), addForwardReverse))
# Forward & Optional Reverse
##########################################################################
def addForwardOptionalReverse(col):
mm = col.models
m = addBasicModel(col)
m['name'] = _("Basic (optional reversed card)")
av = _("Add Reverse")
fm = mm.newField(av)
mm.addField(m, fm)
t = mm.newTemplate(_("Card 2"))
t['qfmt'] = "{{#%s}}{{%s}}{{/%s}}" % (av, _("Back"), av)
t['afmt'] = "{{FrontSide}}\n\n<hr id=answer>\n\n"+"{{"+_("Front")+"}}"
mm.addTemplate(m, t)
return m
models.append((lambda: _("Basic (optional reversed card)"),
addForwardOptionalReverse))
# Cloze
##########################################################################
def addClozeModel(col):
mm = col.models
m = mm.new(_("Cloze"))
m['type'] = MODEL_CLOZE
txt = _("Text")
fm = mm.newField(txt)
mm.addField(m, fm)
fm = mm.newField(_("Extra"))
mm.addField(m, fm)
t = mm.newTemplate(_("Cloze"))
fmt = "{{cloze:%s}}" % txt
m['css'] += """
.cloze {
font-weight: bold;
color: blue;
}"""
t['qfmt'] = fmt
t['afmt'] = fmt + "<br>\n{{%s}}" % _("Extra")
mm.addTemplate(m, t)
mm.add(m)
return m
models.append((lambda: _("Cloze"), addClozeModel)) | PypiClean |
/Octopost-1.0.1b0.tar.gz/Octopost-1.0.1b0/docs/module.rst | ====================================
Code Documenation
====================================
ARPES
======
.. automodule:: arpes_model
:members:
DOS
======
.. automodule:: dos_model
:members:
Bandstructure
==============
.. automodule:: bandstructure_model
:members:
Info
=====
.. automodule:: output_read_model
:members:
Input
======
.. automodule:: input_parser
:members:
Units
=====
.. automodule:: units
:members:
Library
=======
.. automodule:: library
:members:
.. toctree::
| PypiClean |
/Flask-AppBuilder-jwi078-2.1.13.tar.gz/Flask-AppBuilder-jwi078-2.1.13/flask_appbuilder/security/views.py | import datetime
import logging
import re
from flask import abort, current_app, flash, g, redirect, request, session, url_for
from flask_babel import lazy_gettext
from flask_login import login_user, logout_user
import jwt
from werkzeug.security import generate_password_hash
from wtforms import PasswordField, validators
from wtforms.validators import EqualTo
from .decorators import has_access
from .forms import LoginForm_db, LoginForm_oid, ResetPasswordForm, UserInfoEdit
from .._compat import as_unicode
from ..actions import action
from ..baseviews import BaseView
from ..charts.views import DirectByChartView
from ..fieldwidgets import BS3PasswordFieldWidget
from ..views import expose, ModelView, SimpleFormView
from ..widgets import ListWidget, ShowWidget
log = logging.getLogger(__name__)
class PermissionModelView(ModelView):
route_base = "/permissions"
base_permissions = ["can_list"]
list_title = lazy_gettext("List Base Permissions")
show_title = lazy_gettext("Show Base Permission")
add_title = lazy_gettext("Add Base Permission")
edit_title = lazy_gettext("Edit Base Permission")
label_columns = {"name": lazy_gettext("Name")}
class ViewMenuModelView(ModelView):
route_base = "/viewmenus"
base_permissions = ["can_list"]
list_title = lazy_gettext("List View Menus")
show_title = lazy_gettext("Show View Menu")
add_title = lazy_gettext("Add View Menu")
edit_title = lazy_gettext("Edit View Menu")
label_columns = {"name": lazy_gettext("Name")}
class PermissionViewModelView(ModelView):
route_base = "/permissionviews"
base_permissions = ["can_list"]
list_title = lazy_gettext("List Permissions on Views/Menus")
show_title = lazy_gettext("Show Permission on Views/Menus")
add_title = lazy_gettext("Add Permission on Views/Menus")
edit_title = lazy_gettext("Edit Permission on Views/Menus")
label_columns = {
"permission": lazy_gettext("Permission"),
"view_menu": lazy_gettext("View/Menu"),
}
list_columns = ["permission", "view_menu"]
class ResetMyPasswordView(SimpleFormView):
"""
View for resetting own user password
"""
route_base = "/resetmypassword"
form = ResetPasswordForm
form_title = lazy_gettext("Reset Password Form")
redirect_url = "/"
message = lazy_gettext("Password Changed")
def form_post(self, form):
self.appbuilder.sm.reset_password(g.user.id, form.password.data)
flash(as_unicode(self.message), "info")
class ResetPasswordView(SimpleFormView):
"""
View for reseting all users password
"""
route_base = "/resetpassword"
form = ResetPasswordForm
form_title = lazy_gettext("Reset Password Form")
redirect_url = "/"
message = lazy_gettext("Password Changed")
def form_post(self, form):
pk = request.args.get("pk")
self.appbuilder.sm.reset_password(pk, form.password.data)
flash(as_unicode(self.message), "info")
class UserInfoEditView(SimpleFormView):
form = UserInfoEdit
form_title = lazy_gettext("Edit User Information")
redirect_url = "/"
message = lazy_gettext("User information changed")
def form_get(self, form):
item = self.appbuilder.sm.get_user_by_id(g.user.id)
# fills the form generic solution
for key, value in form.data.items():
if key == "csrf_token":
continue
form_field = getattr(form, key)
form_field.data = getattr(item, key)
def form_post(self, form):
form = self.form.refresh(request.form)
item = self.appbuilder.sm.get_user_by_id(g.user.id)
form.populate_obj(item)
self.appbuilder.sm.update_user(item)
flash(as_unicode(self.message), "info")
class UserModelView(ModelView):
route_base = "/users"
list_title = lazy_gettext("List Users")
show_title = lazy_gettext("Show User")
add_title = lazy_gettext("Add User")
edit_title = lazy_gettext("Edit User")
label_columns = {
"get_full_name": lazy_gettext("Full Name"),
"first_name": lazy_gettext("First Name"),
"last_name": lazy_gettext("Last Name"),
"username": lazy_gettext("User Name"),
"password": lazy_gettext("Password"),
"active": lazy_gettext("Is Active?"),
"email": lazy_gettext("Email"),
"roles": lazy_gettext("Role"),
"last_login": lazy_gettext("Last login"),
"login_count": lazy_gettext("Login count"),
"fail_login_count": lazy_gettext("Failed login count"),
"created_on": lazy_gettext("Created on"),
"created_by": lazy_gettext("Created by"),
"changed_on": lazy_gettext("Changed on"),
"changed_by": lazy_gettext("Changed by"),
}
description_columns = {
"first_name": lazy_gettext("Write the user first name or names"),
"last_name": lazy_gettext("Write the user last name"),
"username": lazy_gettext(
"Username valid for authentication on DB or LDAP, unused for OID auth"
),
"password": lazy_gettext(
"Please use a good password policy,"
" this application does not check this for you"
),
"active": lazy_gettext(
"It's not a good policy to remove a user, just make it inactive"
),
"email": lazy_gettext("The user's email, this will also be used for OID auth"),
"roles": lazy_gettext(
"The user role on the application,"
" this will associate with a list of permissions"
),
"conf_password": lazy_gettext("Please rewrite the user's password to confirm"),
}
list_columns = ["first_name", "last_name", "username", "email", "active", "roles"]
show_fieldsets = [
(
lazy_gettext("User info"),
{"fields": ["username", "active", "roles", "login_count"]},
),
(
lazy_gettext("Personal Info"),
{"fields": ["first_name", "last_name", "email"], "expanded": True},
),
(
lazy_gettext("Audit Info"),
{
"fields": [
"last_login",
"fail_login_count",
"created_on",
"created_by",
"changed_on",
"changed_by",
],
"expanded": False,
},
),
]
user_show_fieldsets = [
(
lazy_gettext("User info"),
{"fields": ["username", "active", "roles", "login_count"]},
),
(
lazy_gettext("Personal Info"),
{"fields": ["first_name", "last_name", "email"], "expanded": True},
),
]
search_exclude_columns = ["password"]
add_columns = ["first_name", "last_name", "username", "active", "email", "roles"]
edit_columns = ["first_name", "last_name", "username", "active", "email", "roles"]
user_info_title = lazy_gettext("Your user information")
@expose("/userinfo/")
@has_access
def userinfo(self):
item = self.datamodel.get(g.user.id, self._base_filters)
widgets = self._get_show_widget(
g.user.id, item, show_fieldsets=self.user_show_fieldsets
)
self.update_redirect()
return self.render_template(
self.show_template,
title=self.user_info_title,
widgets=widgets,
appbuilder=self.appbuilder,
)
@action("userinfoedit", lazy_gettext("Edit User"), "", "fa-edit", multiple=False)
def userinfoedit(self, item):
return redirect(
url_for(self.appbuilder.sm.userinfoeditview.__name__ + ".this_form_get")
)
class UserOIDModelView(UserModelView):
"""
View that add OID specifics to User view.
Override to implement your own custom view.
Then override useroidmodelview property on SecurityManager
"""
pass
class UserLDAPModelView(UserModelView):
"""
View that add LDAP specifics to User view.
Override to implement your own custom view.
Then override userldapmodelview property on SecurityManager
"""
pass
class UserOAuthModelView(UserModelView):
"""
View that add OAUTH specifics to User view.
Override to implement your own custom view.
Then override userldapmodelview property on SecurityManager
"""
pass
class UserRemoteUserModelView(UserModelView):
"""
View that add REMOTE_USER specifics to User view.
Override to implement your own custom view.
Then override userldapmodelview property on SecurityManager
"""
pass
class UserDBModelView(UserModelView):
"""
View that add DB specifics to User view.
Override to implement your own custom view.
Then override userdbmodelview property on SecurityManager
"""
add_form_extra_fields = {
"password": PasswordField(
lazy_gettext("Password"),
description=lazy_gettext(
"Please use a good password policy,"
" this application does not check this for you"
),
validators=[validators.DataRequired()],
widget=BS3PasswordFieldWidget(),
),
"conf_password": PasswordField(
lazy_gettext("Confirm Password"),
description=lazy_gettext("Please rewrite the user's password to confirm"),
validators=[
EqualTo("password", message=lazy_gettext("Passwords must match"))
],
widget=BS3PasswordFieldWidget(),
),
}
add_columns = [
"first_name",
"last_name",
"username",
"active",
"email",
"roles",
"password",
"conf_password",
]
@expose("/show/<pk>", methods=["GET"])
@has_access
def show(self, pk):
actions = dict()
actions["resetpasswords"] = self.actions.get("resetpasswords")
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
widgets = self._get_show_widget(pk, item, actions=actions)
self.update_redirect()
return self.render_template(
self.show_template,
pk=pk,
title=self.show_title,
widgets=widgets,
appbuilder=self.appbuilder,
related_views=self._related_views,
)
@expose("/userinfo/")
@has_access
def userinfo(self):
actions = dict()
actions["resetmypassword"] = self.actions.get("resetmypassword")
actions["userinfoedit"] = self.actions.get("userinfoedit")
item = self.datamodel.get(g.user.id, self._base_filters)
widgets = self._get_show_widget(
g.user.id, item, actions=actions, show_fieldsets=self.user_show_fieldsets
)
self.update_redirect()
return self.render_template(
self.show_template,
title=self.user_info_title,
widgets=widgets,
appbuilder=self.appbuilder,
)
@action(
"resetmypassword",
lazy_gettext("Reset my password"),
"",
"fa-lock",
multiple=False,
)
def resetmypassword(self, item):
return redirect(
url_for(self.appbuilder.sm.resetmypasswordview.__name__ + ".this_form_get")
)
@action(
"resetpasswords", lazy_gettext("Reset Password"), "", "fa-lock", multiple=False
)
def resetpasswords(self, item):
return redirect(
url_for(
self.appbuilder.sm.resetpasswordview.__name__ + ".this_form_get",
pk=item.id,
)
)
def pre_update(self, item):
item.changed_on = datetime.datetime.now()
item.changed_by_fk = g.user.id
def pre_add(self, item):
item.password = generate_password_hash(item.password)
class UserStatsChartView(DirectByChartView):
chart_title = lazy_gettext("User Statistics")
label_columns = {
"username": lazy_gettext("User Name"),
"login_count": lazy_gettext("Login count"),
"fail_login_count": lazy_gettext("Failed login count"),
}
search_columns = UserModelView.search_columns
definitions = [
{"label": "Login Count", "group": "username", "series": ["login_count"]},
{
"label": "Failed Login Count",
"group": "username",
"series": ["fail_login_count"],
},
]
class RoleListWidget(ListWidget):
template = 'appbuilder/general/widgets/roles/list.html'
def __init__(self, **kwargs):
kwargs['appbuilder'] = current_app.appbuilder
super().__init__(**kwargs)
class RoleShowWidget(ShowWidget):
template = 'appbuilder/general/widgets/roles/show.html'
def __init__(self, **kwargs):
kwargs['appbuilder'] = current_app.appbuilder
super().__init__(**kwargs)
class RoleModelView(ModelView):
route_base = "/roles"
list_title = lazy_gettext("List Roles")
show_title = lazy_gettext("Show Role")
add_title = lazy_gettext("Add Role")
edit_title = lazy_gettext("Edit Role")
list_widget = RoleListWidget
show_widget = RoleShowWidget
label_columns = {
"name": lazy_gettext("Name"),
"permissions": lazy_gettext("Permissions"),
}
list_columns = ["name", "permissions"]
show_columns = ["name", "permissions"]
edit_columns = ["name", "permissions"]
add_columns = edit_columns
order_columns = ["name"]
@action(
"copyrole",
lazy_gettext("Copy Role"),
lazy_gettext("Copy the selected roles?"),
icon="fa-copy",
single=False,
)
def copy_role(self, items):
self.update_redirect()
for item in items:
new_role = item.__class__()
new_role.name = item.name
new_role.permissions = item.permissions
new_role.name = new_role.name + " copy"
self.datamodel.add(new_role)
return redirect(self.get_redirect())
class RegisterUserModelView(ModelView):
route_base = "/registeruser"
base_permissions = ["can_list", "can_show", "can_delete"]
list_title = lazy_gettext("List of Registration Requests")
show_title = lazy_gettext("Show Registration")
list_columns = ["username", "registration_date", "email"]
show_exclude_columns = ["password"]
search_exclude_columns = ["password"]
class AuthView(BaseView):
route_base = ""
login_template = ""
invalid_login_message = lazy_gettext("Invalid login. Please try again.")
title = lazy_gettext("Sign In")
@expose("/login/", methods=["GET", "POST"])
def login(self):
pass
@expose("/logout/")
def logout(self):
logout_user()
return redirect(self.appbuilder.get_url_for_index)
class AuthDBView(AuthView):
login_template = "appbuilder/general/security/login_db.html"
@expose("/login/", methods=["GET", "POST"])
def login(self):
if g.user is not None and g.user.is_authenticated:
return redirect(self.appbuilder.get_url_for_index)
form = LoginForm_db()
if form.validate_on_submit():
user = self.appbuilder.sm.auth_user_db(
form.username.data, form.password.data
)
if not user:
flash(as_unicode(self.invalid_login_message), "warning")
return redirect(self.appbuilder.get_url_for_login)
login_user(user, remember=False)
next_url = request.args.get('next', '')
if not next_url:
next_url = self.appbuilder.get_url_for_index
return redirect(next_url)
return self.render_template(
self.login_template, title=self.title, form=form, appbuilder=self.appbuilder
)
class AuthLDAPView(AuthView):
login_template = "appbuilder/general/security/login_ldap.html"
@expose("/login/", methods=["GET", "POST"])
def login(self):
if g.user is not None and g.user.is_authenticated:
return redirect(self.appbuilder.get_url_for_index)
form = LoginForm_db()
if form.validate_on_submit():
user = self.appbuilder.sm.auth_user_ldap(
form.username.data, form.password.data
)
if not user:
flash(as_unicode(self.invalid_login_message), "warning")
return redirect(self.appbuilder.get_url_for_login)
login_user(user, remember=False)
next_url = request.args.get('next', '')
if not next_url:
next_url = self.appbuilder.get_url_for_index
return redirect(next_url)
return self.render_template(
self.login_template, title=self.title, form=form, appbuilder=self.appbuilder
)
"""
For Future Use, API Auth, must check howto keep REST stateless
"""
"""
@expose_api(name='auth',url='/api/auth')
def auth(self):
if g.user is not None and g.user.is_authenticated:
http_return_code = 401
response = make_response(
jsonify(
{
'message': 'Login Failed already authenticated',
'severity': 'critical'
}
),
http_return_code
)
username = str(request.args.get('username'))
password = str(request.args.get('password'))
user = self.appbuilder.sm.auth_user_ldap(username, password)
if not user:
http_return_code = 401
response = make_response(
jsonify(
{
'message': 'Login Failed',
'severity': 'critical'
}
),
http_return_code
)
else:
login_user(user, remember=False)
http_return_code = 201
response = make_response(
jsonify(
{
'message': 'Login Success',
'severity': 'info'
}
),
http_return_code
)
return response
"""
class AuthOIDView(AuthView):
login_template = "appbuilder/general/security/login_oid.html"
oid_ask_for = ["email"]
oid_ask_for_optional = []
def __init__(self):
super(AuthOIDView, self).__init__()
@expose("/login/", methods=["GET", "POST"])
def login(self, flag=True):
@self.appbuilder.sm.oid.loginhandler
def login_handler(self):
if g.user is not None and g.user.is_authenticated:
return redirect(self.appbuilder.get_url_for_index)
form = LoginForm_oid()
if form.validate_on_submit():
session["remember_me"] = form.remember_me.data
return self.appbuilder.sm.oid.try_login(
form.openid.data,
ask_for=self.oid_ask_for,
ask_for_optional=self.oid_ask_for_optional,
)
return self.render_template(
self.login_template,
title=self.title,
form=form,
providers=self.appbuilder.sm.openid_providers,
appbuilder=self.appbuilder,
)
@self.appbuilder.sm.oid.after_login
def after_login(resp):
if resp.email is None or resp.email == "":
flash(as_unicode(self.invalid_login_message), "warning")
return redirect("login")
user = self.appbuilder.sm.auth_user_oid(resp.email)
if user is None:
flash(as_unicode(self.invalid_login_message), "warning")
return redirect("login")
remember_me = False
if "remember_me" in session:
remember_me = session["remember_me"]
session.pop("remember_me", None)
login_user(user, remember=remember_me)
return redirect(self.appbuilder.get_url_for_index)
return login_handler(self)
class AuthOAuthView(AuthView):
login_template = "appbuilder/general/security/login_oauth.html"
@expose("/login/")
@expose("/login/<provider>")
@expose("/login/<provider>/<register>")
def login(self, provider=None, register=None):
log.debug("Provider: {0}".format(provider))
if g.user is not None and g.user.is_authenticated:
log.debug("Already authenticated {0}".format(g.user))
return redirect(self.appbuilder.get_url_for_index)
if provider is None:
return self.render_template(
self.login_template,
providers=self.appbuilder.sm.oauth_providers,
title=self.title,
appbuilder=self.appbuilder,
)
else:
log.debug("Going to call authorize for: {0}".format(provider))
state = jwt.encode(
request.args.to_dict(flat=False),
self.appbuilder.app.config["SECRET_KEY"],
algorithm="HS256",
)
try:
if register:
log.debug("Login to Register")
session["register"] = True
if provider == "twitter":
return self.appbuilder.sm.oauth_remotes[provider].authorize(
callback=url_for(
".oauth_authorized",
provider=provider,
_external=True,
state=state,
)
)
else:
return self.appbuilder.sm.oauth_remotes[provider].authorize(
callback=url_for(
".oauth_authorized", provider=provider, _external=True
),
state=state,
)
except Exception as e:
log.error("Error on OAuth authorize: {0}".format(e))
flash(as_unicode(self.invalid_login_message), "warning")
return redirect(self.appbuilder.get_url_for_index)
@expose("/oauth-authorized/<provider>")
def oauth_authorized(self, provider):
log.debug("Authorized init")
resp = self.appbuilder.sm.oauth_remotes[provider].authorized_response()
if resp is None:
flash(u"You denied the request to sign in.", "warning")
return redirect("login")
log.debug("OAUTH Authorized resp: {0}".format(resp))
# Retrieves specific user info from the provider
try:
self.appbuilder.sm.set_oauth_session(provider, resp)
userinfo = self.appbuilder.sm.oauth_user_info(provider, resp)
except Exception as e:
log.error("Error returning OAuth user info: {0}".format(e))
user = None
else:
log.debug("User info retrieved from {0}: {1}".format(provider, userinfo))
# User email is not whitelisted
if provider in self.appbuilder.sm.oauth_whitelists:
whitelist = self.appbuilder.sm.oauth_whitelists[provider]
allow = False
for e in whitelist:
if re.search(e, userinfo["email"]):
allow = True
break
if not allow:
flash(u"You are not authorized.", "warning")
return redirect("login")
else:
log.debug("No whitelist for OAuth provider")
user = self.appbuilder.sm.auth_user_oauth(userinfo)
if user is None:
flash(as_unicode(self.invalid_login_message), "warning")
return redirect("login")
else:
login_user(user)
try:
state = jwt.decode(
request.args["state"],
self.appbuilder.app.config["SECRET_KEY"],
algorithms=["HS256"],
)
except jwt.InvalidTokenError:
raise Exception("State signature is not valid!")
try:
next_url = state["next"][0] or self.appbuilder.get_url_for_index
except (KeyError, IndexError):
next_url = self.appbuilder.get_url_for_index
return redirect(next_url)
class AuthRemoteUserView(AuthView):
login_template = ""
@expose("/login/")
def login(self):
username = request.environ.get("REMOTE_USER")
if g.user is not None and g.user.is_authenticated:
return redirect(self.appbuilder.get_url_for_index)
if username:
user = self.appbuilder.sm.auth_user_remote_user(username)
if user is None:
flash(as_unicode(self.invalid_login_message), "warning")
else:
login_user(user)
else:
flash(as_unicode(self.invalid_login_message), "warning")
return redirect(self.appbuilder.get_url_for_index) | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/webcomponentsjs/ShadowDOM.min.js | "undefined"==typeof WeakMap&&!function(){var e=Object.defineProperty,t=Date.now()%1e9,n=function(){this.name="__st"+(1e9*Math.random()>>>0)+(t++ +"__")};n.prototype={set:function(t,n){var r=t[this.name];return r&&r[0]===t?r[1]=n:e(t,this.name,{value:[t,n],writable:!0}),this},get:function(e){var t;return(t=e[this.name])&&t[0]===e?t[1]:void 0},"delete":function(e){var t=e[this.name];return!(!t||t[0]!==e)&&(t[0]=t[1]=void 0,!0)},has:function(e){var t=e[this.name];return!!t&&t[0]===e}},window.WeakMap=n}(),window.ShadowDOMPolyfill={},function(e){"use strict";function t(){if("undefined"!=typeof chrome&&chrome.app&&chrome.app.runtime)return!1;if(navigator.getDeviceStorage)return!1;try{var e=new Function("return true;");return e()}catch(t){return!1}}function n(e){if(!e)throw new Error("Assertion failed")}function r(e,t){for(var n=k(t),r=0;r<n.length;r++){var o=n[r];A(e,o,F(t,o))}return e}function o(e,t){for(var n=k(t),r=0;r<n.length;r++){var o=n[r];switch(o){case"arguments":case"caller":case"length":case"name":case"prototype":case"toString":continue}A(e,o,F(t,o))}return e}function i(e,t){for(var n=0;n<t.length;n++)if(t[n]in e)return t[n]}function a(e,t,n){B.value=n,A(e,t,B)}function s(e,t){var n=e.__proto__||Object.getPrototypeOf(e);if(U)try{k(n)}catch(r){n=n.__proto__}var o=R.get(n);if(o)return o;var i=s(n),a=E(i);return g(n,a,t),a}function c(e,t){m(e,t,!0)}function u(e,t){m(t,e,!1)}function l(e){return/^on[a-z]+$/.test(e)}function p(e){return/^[a-zA-Z_$][a-zA-Z_$0-9]*$/.test(e)}function d(e){return I&&p(e)?new Function("return this.__impl4cf1e782hg__."+e):function(){return this.__impl4cf1e782hg__[e]}}function f(e){return I&&p(e)?new Function("v","this.__impl4cf1e782hg__."+e+" = v"):function(t){this.__impl4cf1e782hg__[e]=t}}function h(e){return I&&p(e)?new Function("return this.__impl4cf1e782hg__."+e+".apply(this.__impl4cf1e782hg__, arguments)"):function(){return this.__impl4cf1e782hg__[e].apply(this.__impl4cf1e782hg__,arguments)}}function w(e,t){try{return e===window&&"showModalDialog"===t?q:Object.getOwnPropertyDescriptor(e,t)}catch(n){return q}}function m(t,n,r,o){for(var i=k(t),a=0;a<i.length;a++){var s=i[a];if("polymerBlackList_"!==s&&!(s in n||t.polymerBlackList_&&t.polymerBlackList_[s])){U&&t.__lookupGetter__(s);var c,u,p=w(t,s);if("function"!=typeof p.value){var m=l(s);c=m?e.getEventHandlerGetter(s):d(s),(p.writable||p.set||V)&&(u=m?e.getEventHandlerSetter(s):f(s));var v=V||p.configurable;A(n,s,{get:c,set:u,configurable:v,enumerable:p.enumerable})}else r&&(n[s]=h(s))}}}function v(e,t,n){if(null!=e){var r=e.prototype;g(r,t,n),o(t,e)}}function g(e,t,r){var o=t.prototype;n(void 0===R.get(e)),R.set(e,t),P.set(o,e),c(e,o),r&&u(o,r),a(o,"constructor",t),t.prototype=o}function b(e,t){return R.get(t.prototype)===e}function y(e){var t=Object.getPrototypeOf(e),n=s(t),r=E(n);return g(t,r,e),r}function E(e){function t(t){e.call(this,t)}var n=Object.create(e.prototype);return n.constructor=t,t.prototype=n,t}function S(e){return e&&e.__impl4cf1e782hg__}function M(e){return!S(e)}function T(e){if(null===e)return null;n(M(e));var t=e.__wrapper8e3dd93a60__;return null!=t?t:e.__wrapper8e3dd93a60__=new(s(e,e))(e)}function O(e){return null===e?null:(n(S(e)),e.__impl4cf1e782hg__)}function N(e){return e.__impl4cf1e782hg__}function j(e,t){t.__impl4cf1e782hg__=e,e.__wrapper8e3dd93a60__=t}function L(e){return e&&S(e)?O(e):e}function _(e){return e&&!S(e)?T(e):e}function D(e,t){null!==t&&(n(M(e)),n(void 0===t||S(t)),e.__wrapper8e3dd93a60__=t)}function C(e,t,n){G.get=n,A(e.prototype,t,G)}function H(e,t){C(e,t,function(){return T(this.__impl4cf1e782hg__[t])})}function x(e,t){e.forEach(function(e){t.forEach(function(t){e.prototype[t]=function(){var e=_(this);return e[t].apply(e,arguments)}})})}var R=new WeakMap,P=new WeakMap,W=Object.create(null),I=t(),A=Object.defineProperty,k=Object.getOwnPropertyNames,F=Object.getOwnPropertyDescriptor,B={value:void 0,configurable:!0,enumerable:!1,writable:!0};k(window);var U=/Firefox/.test(navigator.userAgent),q={get:function(){},set:function(e){},configurable:!0,enumerable:!0},V=function(){var e=Object.getOwnPropertyDescriptor(Node.prototype,"nodeType");return e&&!e.get&&!e.set}(),G={get:void 0,configurable:!0,enumerable:!0};e.addForwardingProperties=c,e.assert=n,e.constructorTable=R,e.defineGetter=C,e.defineWrapGetter=H,e.forwardMethodsToWrapper=x,e.isIdentifierName=p,e.isWrapper=S,e.isWrapperFor=b,e.mixin=r,e.nativePrototypeTable=P,e.oneOf=i,e.registerObject=y,e.registerWrapper=v,e.rewrap=D,e.setWrapper=j,e.unsafeUnwrap=N,e.unwrap=O,e.unwrapIfNeeded=L,e.wrap=T,e.wrapIfNeeded=_,e.wrappers=W}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e,t,n){return{index:e,removed:t,addedCount:n}}function n(){}var r=0,o=1,i=2,a=3;n.prototype={calcEditDistances:function(e,t,n,r,o,i){for(var a=i-o+1,s=n-t+1,c=new Array(a),u=0;u<a;u++)c[u]=new Array(s),c[u][0]=u;for(var l=0;l<s;l++)c[0][l]=l;for(var u=1;u<a;u++)for(var l=1;l<s;l++)if(this.equals(e[t+l-1],r[o+u-1]))c[u][l]=c[u-1][l-1];else{var p=c[u-1][l]+1,d=c[u][l-1]+1;c[u][l]=p<d?p:d}return c},spliceOperationsFromEditDistances:function(e){for(var t=e.length-1,n=e[0].length-1,s=e[t][n],c=[];t>0||n>0;)if(0!=t)if(0!=n){var u,l=e[t-1][n-1],p=e[t-1][n],d=e[t][n-1];u=p<d?p<l?p:l:d<l?d:l,u==l?(l==s?c.push(r):(c.push(o),s=l),t--,n--):u==p?(c.push(a),t--,s=p):(c.push(i),n--,s=d)}else c.push(a),t--;else c.push(i),n--;return c.reverse(),c},calcSplices:function(e,n,s,c,u,l){var p=0,d=0,f=Math.min(s-n,l-u);if(0==n&&0==u&&(p=this.sharedPrefix(e,c,f)),s==e.length&&l==c.length&&(d=this.sharedSuffix(e,c,f-p)),n+=p,u+=p,s-=d,l-=d,s-n==0&&l-u==0)return[];if(n==s){for(var h=t(n,[],0);u<l;)h.removed.push(c[u++]);return[h]}if(u==l)return[t(n,[],s-n)];for(var w=this.spliceOperationsFromEditDistances(this.calcEditDistances(e,n,s,c,u,l)),h=void 0,m=[],v=n,g=u,b=0;b<w.length;b++)switch(w[b]){case r:h&&(m.push(h),h=void 0),v++,g++;break;case o:h||(h=t(v,[],0)),h.addedCount++,v++,h.removed.push(c[g]),g++;break;case i:h||(h=t(v,[],0)),h.addedCount++,v++;break;case a:h||(h=t(v,[],0)),h.removed.push(c[g]),g++}return h&&m.push(h),m},sharedPrefix:function(e,t,n){for(var r=0;r<n;r++)if(!this.equals(e[r],t[r]))return r;return n},sharedSuffix:function(e,t,n){for(var r=e.length,o=t.length,i=0;i<n&&this.equals(e[--r],t[--o]);)i++;return i},calculateSplices:function(e,t){return this.calcSplices(e,0,e.length,t,0,t.length)},equals:function(e,t){return e===t}},e.ArraySplice=n}(window.ShadowDOMPolyfill),function(e){"use strict";function t(){a=!1;var e=i.slice(0);i=[];for(var t=0;t<e.length;t++)(0,e[t])()}function n(e){i.push(e),a||(a=!0,r(t,0))}var r,o=window.MutationObserver,i=[],a=!1;if(o){var s=1,c=new o(t),u=document.createTextNode(s);c.observe(u,{characterData:!0}),r=function(){s=(s+1)%2,u.data=s}}else r=window.setTimeout;e.setEndOfMicrotask=n}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){e.scheduled_||(e.scheduled_=!0,h.push(e),w||(l(n),w=!0))}function n(){for(w=!1;h.length;){var e=h;h=[],e.sort(function(e,t){return e.uid_-t.uid_});for(var t=0;t<e.length;t++){var n=e[t];n.scheduled_=!1;var r=n.takeRecords();i(n),r.length&&n.callback_(r,n)}}}function r(e,t){this.type=e,this.target=t,this.addedNodes=new d.NodeList,this.removedNodes=new d.NodeList,this.previousSibling=null,this.nextSibling=null,this.attributeName=null,this.attributeNamespace=null,this.oldValue=null}function o(e,t){for(;e;e=e.parentNode){var n=f.get(e);if(n)for(var r=0;r<n.length;r++){var o=n[r];o.options.subtree&&o.addTransientObserver(t)}}}function i(e){for(var t=0;t<e.nodes_.length;t++){var n=e.nodes_[t],r=f.get(n);if(!r)return;for(var o=0;o<r.length;o++){var i=r[o];i.observer===e&&i.removeTransientObservers()}}}function a(e,n,o){for(var i=Object.create(null),a=Object.create(null),s=e;s;s=s.parentNode){var c=f.get(s);if(c)for(var u=0;u<c.length;u++){var l=c[u],p=l.options;if((s===e||p.subtree)&&("attributes"!==n||p.attributes)&&("attributes"!==n||!p.attributeFilter||null===o.namespace&&p.attributeFilter.indexOf(o.name)!==-1)&&("characterData"!==n||p.characterData)&&("childList"!==n||p.childList)){var d=l.observer;i[d.uid_]=d,("attributes"===n&&p.attributeOldValue||"characterData"===n&&p.characterDataOldValue)&&(a[d.uid_]=o.oldValue)}}}for(var h in i){var d=i[h],w=new r(n,e);"name"in o&&"namespace"in o&&(w.attributeName=o.name,w.attributeNamespace=o.namespace),o.addedNodes&&(w.addedNodes=o.addedNodes),o.removedNodes&&(w.removedNodes=o.removedNodes),o.previousSibling&&(w.previousSibling=o.previousSibling),o.nextSibling&&(w.nextSibling=o.nextSibling),void 0!==a[h]&&(w.oldValue=a[h]),t(d),d.records_.push(w)}}function s(e){if(this.childList=!!e.childList,this.subtree=!!e.subtree,"attributes"in e||!("attributeOldValue"in e||"attributeFilter"in e)?this.attributes=!!e.attributes:this.attributes=!0,"characterDataOldValue"in e&&!("characterData"in e)?this.characterData=!0:this.characterData=!!e.characterData,!this.attributes&&(e.attributeOldValue||"attributeFilter"in e)||!this.characterData&&e.characterDataOldValue)throw new TypeError;if(this.characterData=!!e.characterData,this.attributeOldValue=!!e.attributeOldValue,this.characterDataOldValue=!!e.characterDataOldValue,"attributeFilter"in e){if(null==e.attributeFilter||"object"!=typeof e.attributeFilter)throw new TypeError;this.attributeFilter=m.call(e.attributeFilter)}else this.attributeFilter=null}function c(e){this.callback_=e,this.nodes_=[],this.records_=[],this.uid_=++v,this.scheduled_=!1}function u(e,t,n){this.observer=e,this.target=t,this.options=n,this.transientObservedNodes=[]}var l=e.setEndOfMicrotask,p=e.wrapIfNeeded,d=e.wrappers,f=new WeakMap,h=[],w=!1,m=Array.prototype.slice,v=0;c.prototype={constructor:c,observe:function(e,t){e=p(e);var n,r=new s(t),o=f.get(e);o||f.set(e,o=[]);for(var i=0;i<o.length;i++)o[i].observer===this&&(n=o[i],n.removeTransientObservers(),n.options=r);n||(n=new u(this,e,r),o.push(n),this.nodes_.push(e))},disconnect:function(){this.nodes_.forEach(function(e){for(var t=f.get(e),n=0;n<t.length;n++){var r=t[n];if(r.observer===this){t.splice(n,1);break}}},this),this.records_=[]},takeRecords:function(){var e=this.records_;return this.records_=[],e}},u.prototype={addTransientObserver:function(e){if(e!==this.target){t(this.observer),this.transientObservedNodes.push(e);var n=f.get(e);n||f.set(e,n=[]),n.push(this)}},removeTransientObservers:function(){var e=this.transientObservedNodes;this.transientObservedNodes=[];for(var t=0;t<e.length;t++)for(var n=e[t],r=f.get(n),o=0;o<r.length;o++)if(r[o]===this){r.splice(o,1);break}}},e.enqueueMutation=a,e.registerTransientObservers=o,e.wrappers.MutationObserver=c,e.wrappers.MutationRecord=r}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e,t){this.root=e,this.parent=t}function n(e,t){if(e.treeScope_!==t){e.treeScope_=t;for(var r=e.shadowRoot;r;r=r.olderShadowRoot)r.treeScope_.parent=t;for(var o=e.firstChild;o;o=o.nextSibling)n(o,t)}}function r(n){if(n instanceof e.wrappers.Window,n.treeScope_)return n.treeScope_;var o,i=n.parentNode;return o=i?r(i):new t(n,null),n.treeScope_=o}t.prototype={get renderer(){return this.root instanceof e.wrappers.ShadowRoot?e.getRendererForHost(this.root.host):null},contains:function(e){for(;e;e=e.parent)if(e===this)return!0;return!1}},e.TreeScope=t,e.getTreeScope=r,e.setTreeScope=n}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){return e instanceof G.ShadowRoot}function n(e){return A(e).root}function r(e,r){var s=[],c=e;for(s.push(c);c;){var u=a(c);if(u&&u.length>0){for(var l=0;l<u.length;l++){var d=u[l];if(i(d)){var f=n(d),h=f.olderShadowRoot;h&&s.push(h)}s.push(d)}c=u[u.length-1]}else if(t(c)){if(p(e,c)&&o(r))break;c=c.host,s.push(c)}else c=c.parentNode,c&&s.push(c)}return s}function o(e){if(!e)return!1;switch(e.type){case"abort":case"error":case"select":case"change":case"load":case"reset":case"resize":case"scroll":case"selectstart":return!0}return!1}function i(e){return e instanceof HTMLShadowElement}function a(t){return e.getDestinationInsertionPoints(t)}function s(e,t){if(0===e.length)return t;t instanceof G.Window&&(t=t.document);for(var n=A(t),r=e[0],o=A(r),i=u(n,o),a=0;a<e.length;a++){var s=e[a];if(A(s)===i)return s}return e[e.length-1]}function c(e){for(var t=[];e;e=e.parent)t.push(e);return t}function u(e,t){for(var n=c(e),r=c(t),o=null;n.length>0&&r.length>0;){var i=n.pop(),a=r.pop();if(i!==a)break;o=i}return o}function l(e,t,n){t instanceof G.Window&&(t=t.document);var o,i=A(t),a=A(n),s=r(n,e),o=u(i,a);o||(o=a.root);for(var c=o;c;c=c.parent)for(var l=0;l<s.length;l++){var p=s[l];if(A(p)===c)return p}return null}function p(e,t){return A(e)===A(t)}function d(e){if(!X.get(e)&&(X.set(e,!0),h(V(e),V(e.target)),W)){var t=W;throw W=null,t}}function f(e){switch(e.type){case"load":case"beforeunload":case"unload":return!0}return!1}function h(t,n){if(K.get(t))throw new Error("InvalidStateError");K.set(t,!0),e.renderAllPending();var o,i,a;if(f(t)&&!t.bubbles){var s=n;s instanceof G.Document&&(a=s.defaultView)&&(i=s,o=[])}if(!o)if(n instanceof G.Window)a=n,o=[];else if(o=r(n,t),!f(t)){var s=o[o.length-1];s instanceof G.Document&&(a=s.defaultView)}return ne.set(t,o),w(t,o,a,i)&&m(t,o,a,i)&&v(t,o,a,i),J.set(t,re),$["delete"](t,null),K["delete"](t),t.defaultPrevented}function w(e,t,n,r){var o=oe;if(n&&!g(n,e,o,t,r))return!1;for(var i=t.length-1;i>0;i--)if(!g(t[i],e,o,t,r))return!1;return!0}function m(e,t,n,r){var o=ie,i=t[0]||n;return g(i,e,o,t,r)}function v(e,t,n,r){for(var o=ae,i=1;i<t.length;i++)if(!g(t[i],e,o,t,r))return;n&&t.length>0&&g(n,e,o,t,r)}function g(e,t,n,r,o){var i=z.get(e);if(!i)return!0;var a=o||s(r,e);if(a===e){if(n===oe)return!0;n===ae&&(n=ie)}else if(n===ae&&!t.bubbles)return!0;if("relatedTarget"in t){var c=q(t),u=c.relatedTarget;if(u){if(u instanceof Object&&u.addEventListener){var p=V(u),d=l(t,e,p);if(d===a)return!0}else d=null;Z.set(t,d)}}J.set(t,n);var f=t.type,h=!1;Y.set(t,a),$.set(t,e),i.depth++;for(var w=0,m=i.length;w<m;w++){var v=i[w];if(v.removed)h=!0;else if(!(v.type!==f||!v.capture&&n===oe||v.capture&&n===ae))try{if("function"==typeof v.handler?v.handler.call(e,t):v.handler.handleEvent(t),ee.get(t))return!1}catch(g){W||(W=g)}}if(i.depth--,h&&0===i.depth){var b=i.slice();i.length=0;for(var w=0;w<b.length;w++)b[w].removed||i.push(b[w])}return!Q.get(t)}function b(e,t,n){this.type=e,this.handler=t,this.capture=Boolean(n)}function y(e,t){if(!(e instanceof se))return V(T(se,"Event",e,t));var n=e;return be||"beforeunload"!==n.type||this instanceof O?void B(n,this):new O(n)}function E(e){return e&&e.relatedTarget?Object.create(e,{relatedTarget:{value:q(e.relatedTarget)}}):e}function S(e,t,n){var r=window[e],o=function(t,n){return t instanceof r?void B(t,this):V(T(r,e,t,n))};if(o.prototype=Object.create(t.prototype),n&&k(o.prototype,n),r)try{F(r,o,new r("temp"))}catch(i){F(r,o,document.createEvent(e))}return o}function M(e,t){return function(){arguments[t]=q(arguments[t]);var n=q(this);n[e].apply(n,arguments)}}function T(e,t,n,r){if(ve)return new e(n,E(r));var o=q(document.createEvent(t)),i=me[t],a=[n];return Object.keys(i).forEach(function(e){var t=null!=r&&e in r?r[e]:i[e];"relatedTarget"===e&&(t=q(t)),a.push(t)}),o["init"+t].apply(o,a),o}function O(e){y.call(this,e)}function N(e){return"function"==typeof e||e&&e.handleEvent}function j(e){switch(e){case"DOMAttrModified":case"DOMAttributeNameChanged":case"DOMCharacterDataModified":case"DOMElementNameChanged":case"DOMNodeInserted":case"DOMNodeInsertedIntoDocument":case"DOMNodeRemoved":case"DOMNodeRemovedFromDocument":case"DOMSubtreeModified":return!0}return!1}function L(e){B(e,this)}function _(e){return e instanceof G.ShadowRoot&&(e=e.host),q(e)}function D(e,t){var n=z.get(e);if(n)for(var r=0;r<n.length;r++)if(!n[r].removed&&n[r].type===t)return!0;return!1}function C(e,t){for(var n=q(e);n;n=n.parentNode)if(D(V(n),t))return!0;return!1}function H(e){I(e,Ee)}function x(t,n,o,i){e.renderAllPending();var a=V(Se.call(U(n),o,i));if(!a)return null;var c=r(a,null),u=c.lastIndexOf(t);return u==-1?null:(c=c.slice(0,u),s(c,t))}function R(e){return function(){var t=te.get(this);return t&&t[e]&&t[e].value||null}}function P(e){var t=e.slice(2);return function(n){var r=te.get(this);r||(r=Object.create(null),te.set(this,r));var o=r[e];if(o&&this.removeEventListener(t,o.wrapped,!1),"function"==typeof n){var i=function(t){var r=n.call(this,t);r===!1?t.preventDefault():"onbeforeunload"===e&&"string"==typeof r&&(t.returnValue=r)};this.addEventListener(t,i,!1),r[e]={value:n,wrapped:i}}}}var W,I=e.forwardMethodsToWrapper,A=e.getTreeScope,k=e.mixin,F=e.registerWrapper,B=e.setWrapper,U=e.unsafeUnwrap,q=e.unwrap,V=e.wrap,G=e.wrappers,z=(new WeakMap,new WeakMap),X=new WeakMap,K=new WeakMap,Y=new WeakMap,$=new WeakMap,Z=new WeakMap,J=new WeakMap,Q=new WeakMap,ee=new WeakMap,te=new WeakMap,ne=new WeakMap,re=0,oe=1,ie=2,ae=3;b.prototype={equals:function(e){return this.handler===e.handler&&this.type===e.type&&this.capture===e.capture},get removed(){return null===this.handler},remove:function(){this.handler=null}};var se=window.Event;se.prototype.polymerBlackList_={returnValue:!0,keyLocation:!0},y.prototype={get target(){return Y.get(this)},get currentTarget(){return $.get(this)},get eventPhase(){return J.get(this)},get path(){var e=ne.get(this);return e?e.slice():[]},stopPropagation:function(){Q.set(this,!0)},stopImmediatePropagation:function(){Q.set(this,!0),ee.set(this,!0)}};var ce=function(){var e=document.createEvent("Event");return e.initEvent("test",!0,!0),e.preventDefault(),e.defaultPrevented}();ce||(y.prototype.preventDefault=function(){this.cancelable&&(U(this).preventDefault(),Object.defineProperty(this,"defaultPrevented",{get:function(){return!0},configurable:!0}))}),F(se,y,document.createEvent("Event"));var ue=S("UIEvent",y),le=S("CustomEvent",y),pe={get relatedTarget(){var e=Z.get(this);return void 0!==e?e:V(q(this).relatedTarget)}},de=k({initMouseEvent:M("initMouseEvent",14)},pe),fe=k({initFocusEvent:M("initFocusEvent",5)},pe),he=S("MouseEvent",ue,de),we=S("FocusEvent",ue,fe),me=Object.create(null),ve=function(){try{new window.FocusEvent("focus")}catch(e){return!1}return!0}();if(!ve){var ge=function(e,t,n){if(n){var r=me[n];t=k(k({},r),t)}me[e]=t};ge("Event",{bubbles:!1,cancelable:!1}),ge("CustomEvent",{detail:null},"Event"),ge("UIEvent",{view:null,detail:0},"Event"),ge("MouseEvent",{screenX:0,screenY:0,clientX:0,clientY:0,ctrlKey:!1,altKey:!1,shiftKey:!1,metaKey:!1,button:0,relatedTarget:null},"UIEvent"),ge("FocusEvent",{relatedTarget:null},"UIEvent")}var be=window.BeforeUnloadEvent;O.prototype=Object.create(y.prototype),k(O.prototype,{get returnValue(){return U(this).returnValue},set returnValue(e){U(this).returnValue=e}}),be&&F(be,O);var ye=window.EventTarget,Ee=["addEventListener","removeEventListener","dispatchEvent"];[Node,Window].forEach(function(e){var t=e.prototype;Ee.forEach(function(e){Object.defineProperty(t,e+"_",{value:t[e]})})}),L.prototype={addEventListener:function(e,t,n){if(N(t)&&!j(e)){var r=new b(e,t,n),o=z.get(this);if(o){for(var i=0;i<o.length;i++)if(r.equals(o[i]))return}else o=[],o.depth=0,z.set(this,o);o.push(r);var a=_(this);a.addEventListener_(e,d,!0)}},removeEventListener:function(e,t,n){n=Boolean(n);var r=z.get(this);if(r){for(var o=0,i=!1,a=0;a<r.length;a++)r[a].type===e&&r[a].capture===n&&(o++,r[a].handler===t&&(i=!0,r[a].remove()));if(i&&1===o){var s=_(this);s.removeEventListener_(e,d,!0)}}},dispatchEvent:function(t){var n=q(t),r=n.type;X.set(n,!1),e.renderAllPending();var o;C(this,r)||(o=function(){},this.addEventListener(r,o,!0));try{return q(this).dispatchEvent_(n)}finally{o&&this.removeEventListener(r,o,!0)}}},ye&&F(ye,L);var Se=document.elementFromPoint;e.elementFromPoint=x,e.getEventHandlerGetter=R,e.getEventHandlerSetter=P,e.wrapEventTargetMethods=H,e.wrappers.BeforeUnloadEvent=O,e.wrappers.CustomEvent=le,e.wrappers.Event=y,e.wrappers.EventTarget=L,e.wrappers.FocusEvent=we,e.wrappers.MouseEvent=he,e.wrappers.UIEvent=ue}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e,t){Object.defineProperty(e,t,w)}function n(e){u(e,this)}function r(){this.length=0,t(this,"length")}function o(e){for(var t=new r,o=0;o<e.length;o++)t[o]=new n(e[o]);return t.length=o,t}function i(e){a.call(this,e)}var a=e.wrappers.UIEvent,s=e.mixin,c=e.registerWrapper,u=e.setWrapper,l=e.unsafeUnwrap,p=e.wrap,d=window.TouchEvent;if(d){var f;try{f=document.createEvent("TouchEvent")}catch(h){return}var w={enumerable:!1};n.prototype={get target(){return p(l(this).target)}};var m={configurable:!0,enumerable:!0,get:null};["clientX","clientY","screenX","screenY","pageX","pageY","identifier","webkitRadiusX","webkitRadiusY","webkitRotationAngle","webkitForce"].forEach(function(e){m.get=function(){return l(this)[e]},Object.defineProperty(n.prototype,e,m)}),r.prototype={item:function(e){return this[e]}},i.prototype=Object.create(a.prototype),s(i.prototype,{get touches(){return o(l(this).touches)},get targetTouches(){return o(l(this).targetTouches)},get changedTouches(){return o(l(this).changedTouches)},initTouchEvent:function(){throw new Error("Not implemented")}}),c(d,i,f),e.wrappers.Touch=n,e.wrappers.TouchEvent=i,e.wrappers.TouchList=r}}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e,t){Object.defineProperty(e,t,s)}function n(){this.length=0,t(this,"length")}function r(e){if(null==e)return e;for(var t=new n,r=0,o=e.length;r<o;r++)t[r]=a(e[r]);return t.length=o,t}function o(e,t){e.prototype[t]=function(){return r(i(this)[t].apply(i(this),arguments))}}var i=e.unsafeUnwrap,a=e.wrap,s={enumerable:!1};n.prototype={item:function(e){return this[e]}},t(n.prototype,"item"),e.wrappers.NodeList=n,e.addWrapNodeListMethod=o,e.wrapNodeList=r}(window.ShadowDOMPolyfill),function(e){"use strict";e.wrapHTMLCollection=e.wrapNodeList,e.wrappers.HTMLCollection=e.wrappers.NodeList}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){N(e instanceof S)}function n(e){var t=new T;return t[0]=e,t.length=1,t}function r(e,t,n){L(t,"childList",{removedNodes:n,previousSibling:e.previousSibling,nextSibling:e.nextSibling})}function o(e,t){L(e,"childList",{removedNodes:t})}function i(e,t,r,o){if(e instanceof DocumentFragment){var i=s(e);B=!0;for(var a=i.length-1;a>=0;a--)e.removeChild(i[a]),i[a].parentNode_=t;B=!1;for(var a=0;a<i.length;a++)i[a].previousSibling_=i[a-1]||r,i[a].nextSibling_=i[a+1]||o;return r&&(r.nextSibling_=i[0]),o&&(o.previousSibling_=i[i.length-1]),i}var i=n(e),c=e.parentNode;return c&&c.removeChild(e),e.parentNode_=t,e.previousSibling_=r,e.nextSibling_=o,r&&(r.nextSibling_=e),o&&(o.previousSibling_=e),i}function a(e){if(e instanceof DocumentFragment)return s(e);var t=n(e),o=e.parentNode;return o&&r(e,o,t),t}function s(e){for(var t=new T,n=0,r=e.firstChild;r;r=r.nextSibling)t[n++]=r;return t.length=n,o(e,t),t}function c(e){return e}function u(e,t){R(e,t),e.nodeIsInserted_()}function l(e,t){for(var n=_(t),r=0;r<e.length;r++)u(e[r],n)}function p(e){R(e,new O(e,null))}function d(e){for(var t=0;t<e.length;t++)p(e[t])}function f(e,t){var n=e.nodeType===S.DOCUMENT_NODE?e:e.ownerDocument;n!==t.ownerDocument&&n.adoptNode(t)}function h(t,n){if(n.length){var r=t.ownerDocument;if(r!==n[0].ownerDocument)for(var o=0;o<n.length;o++)e.adoptNodeNoRemove(n[o],r)}}function w(e,t){h(e,t);var n=t.length;if(1===n)return W(t[0]);for(var r=W(e.ownerDocument.createDocumentFragment()),o=0;o<n;o++)r.appendChild(W(t[o]));return r}function m(e){if(void 0!==e.firstChild_)for(var t=e.firstChild_;t;){var n=t;t=t.nextSibling_,n.parentNode_=n.previousSibling_=n.nextSibling_=void 0}e.firstChild_=e.lastChild_=void 0}function v(e){if(e.invalidateShadowRenderer()){for(var t=e.firstChild;t;){N(t.parentNode===e);var n=t.nextSibling,r=W(t),o=r.parentNode;o&&Y.call(o,r),t.previousSibling_=t.nextSibling_=t.parentNode_=null,t=n}e.firstChild_=e.lastChild_=null}else for(var n,i=W(e),a=i.firstChild;a;)n=a.nextSibling,Y.call(i,a),a=n}function g(e){var t=e.parentNode;return t&&t.invalidateShadowRenderer()}function b(e){for(var t,n=0;n<e.length;n++)t=e[n],t.parentNode.removeChild(t)}function y(e,t,n){var r;if(r=A(n?U.call(n,P(e),!1):q.call(P(e),!1)),t){for(var o=e.firstChild;o;o=o.nextSibling)r.appendChild(y(o,!0,n));if(e instanceof F.HTMLTemplateElement)for(var i=r.content,o=e.content.firstChild;o;o=o.nextSibling)i.appendChild(y(o,!0,n))}return r}function E(e,t){if(!t||_(e)!==_(t))return!1;for(var n=t;n;n=n.parentNode)if(n===e)return!0;return!1}function S(e){N(e instanceof V),M.call(this,e),this.parentNode_=void 0,this.firstChild_=void 0,this.lastChild_=void 0,this.nextSibling_=void 0,this.previousSibling_=void 0,this.treeScope_=void 0}var M=e.wrappers.EventTarget,T=e.wrappers.NodeList,O=e.TreeScope,N=e.assert,j=e.defineWrapGetter,L=e.enqueueMutation,_=e.getTreeScope,D=e.isWrapper,C=e.mixin,H=e.registerTransientObservers,x=e.registerWrapper,R=e.setTreeScope,P=e.unsafeUnwrap,W=e.unwrap,I=e.unwrapIfNeeded,A=e.wrap,k=e.wrapIfNeeded,F=e.wrappers,B=!1,U=document.importNode,q=window.Node.prototype.cloneNode,V=window.Node,G=window.DocumentFragment,z=(V.prototype.appendChild,V.prototype.compareDocumentPosition),X=V.prototype.isEqualNode,K=V.prototype.insertBefore,Y=V.prototype.removeChild,$=V.prototype.replaceChild,Z=/Trident|Edge/.test(navigator.userAgent),J=Z?function(e,t){try{Y.call(e,t)}catch(n){if(!(e instanceof G))throw n}}:function(e,t){Y.call(e,t)};S.prototype=Object.create(M.prototype),C(S.prototype,{appendChild:function(e){return this.insertBefore(e,null)},insertBefore:function(e,n){t(e);var r;n?D(n)?r=W(n):(r=n,n=A(r)):(n=null,r=null),n&&N(n.parentNode===this);var o,s=n?n.previousSibling:this.lastChild,c=!this.invalidateShadowRenderer()&&!g(e);if(o=c?a(e):i(e,this,s,n),c)f(this,e),m(this),K.call(P(this),W(e),r);else{s||(this.firstChild_=o[0]),n||(this.lastChild_=o[o.length-1],void 0===this.firstChild_&&(this.firstChild_=this.firstChild));var u=r?r.parentNode:P(this);u?K.call(u,w(this,o),r):h(this,o)}return L(this,"childList",{addedNodes:o,nextSibling:n,previousSibling:s}),l(o,this),e},removeChild:function(e){if(t(e),e.parentNode!==this){for(var r=!1,o=(this.childNodes,this.firstChild);o;o=o.nextSibling)if(o===e){r=!0;break}if(!r)throw new Error("NotFoundError")}var i=W(e),a=e.nextSibling,s=e.previousSibling;if(this.invalidateShadowRenderer()){var c=this.firstChild,u=this.lastChild,l=i.parentNode;l&&J(l,i),c===e&&(this.firstChild_=a),u===e&&(this.lastChild_=s),s&&(s.nextSibling_=a),a&&(a.previousSibling_=s),e.previousSibling_=e.nextSibling_=e.parentNode_=void 0}else m(this),J(P(this),i);return B||L(this,"childList",{removedNodes:n(e),nextSibling:a,previousSibling:s}),H(this,e),e},replaceChild:function(e,r){t(e);var o;if(D(r)?o=W(r):(o=r,r=A(o)),r.parentNode!==this)throw new Error("NotFoundError");var s,c=r.nextSibling,u=r.previousSibling,d=!this.invalidateShadowRenderer()&&!g(e);return d?s=a(e):(c===e&&(c=e.nextSibling),s=i(e,this,u,c)),d?(f(this,e),m(this),$.call(P(this),W(e),o)):(this.firstChild===r&&(this.firstChild_=s[0]),this.lastChild===r&&(this.lastChild_=s[s.length-1]),r.previousSibling_=r.nextSibling_=r.parentNode_=void 0,o.parentNode&&$.call(o.parentNode,w(this,s),o)),L(this,"childList",{addedNodes:s,removedNodes:n(r),nextSibling:c,previousSibling:u}),p(r),l(s,this),r},nodeIsInserted_:function(){for(var e=this.firstChild;e;e=e.nextSibling)e.nodeIsInserted_()},hasChildNodes:function(){return null!==this.firstChild},get parentNode(){return void 0!==this.parentNode_?this.parentNode_:A(P(this).parentNode)},get firstChild(){return void 0!==this.firstChild_?this.firstChild_:A(P(this).firstChild)},get lastChild(){return void 0!==this.lastChild_?this.lastChild_:A(P(this).lastChild)},get nextSibling(){return void 0!==this.nextSibling_?this.nextSibling_:A(P(this).nextSibling)},get previousSibling(){return void 0!==this.previousSibling_?this.previousSibling_:A(P(this).previousSibling)},get parentElement(){for(var e=this.parentNode;e&&e.nodeType!==S.ELEMENT_NODE;)e=e.parentNode;return e},get textContent(){for(var e="",t=this.firstChild;t;t=t.nextSibling)t.nodeType!=S.COMMENT_NODE&&(e+=t.textContent);return e},set textContent(e){null==e&&(e="");var t=c(this.childNodes);if(this.invalidateShadowRenderer()){if(v(this),""!==e){var n=P(this).ownerDocument.createTextNode(e);this.appendChild(n)}}else m(this),P(this).textContent=e;var r=c(this.childNodes);L(this,"childList",{addedNodes:r,removedNodes:t}),d(t),l(r,this)},get childNodes(){for(var e=new T,t=0,n=this.firstChild;n;n=n.nextSibling)e[t++]=n;return e.length=t,e},cloneNode:function(e){return y(this,e)},contains:function(e){return E(this,k(e))},compareDocumentPosition:function(e){return z.call(P(this),I(e))},isEqualNode:function(e){return X.call(P(this),I(e))},normalize:function(){for(var e,t,n=c(this.childNodes),r=[],o="",i=0;i<n.length;i++)t=n[i],t.nodeType===S.TEXT_NODE?e||t.data.length?e?(o+=t.data,r.push(t)):e=t:this.removeChild(t):(e&&r.length&&(e.data+=o,b(r)),r=[],o="",e=null,t.childNodes.length&&t.normalize());e&&r.length&&(e.data+=o,b(r))}}),j(S,"ownerDocument"),x(V,S,document.createDocumentFragment()),delete S.prototype.querySelector,delete S.prototype.querySelectorAll,S.prototype=C(Object.create(M.prototype),S.prototype),e.cloneNode=y,e.nodeWasAdded=u,e.nodeWasRemoved=p,e.nodesWereAdded=l,e.nodesWereRemoved=d,e.originalInsertBefore=K,e.originalRemoveChild=Y,e.snapshotNodeList=c,e.wrappers.Node=S}(window.ShadowDOMPolyfill),function(e){"use strict";function t(t,n,r,o){for(var i=null,a=null,s=0,c=t.length;s<c;s++)i=b(t[s]),!o&&(a=v(i).root)&&a instanceof e.wrappers.ShadowRoot||(r[n++]=i);return n}function n(e){return String(e).replace(/\/deep\/|::shadow|>>>/g," ")}function r(e){return String(e).replace(/:host\(([^\s]+)\)/g,"$1").replace(/([^\s]):host/g,"$1").replace(":host","*").replace(/\^|\/shadow\/|\/shadow-deep\/|::shadow|\/deep\/|::content|>>>/g," ")}function o(e,t){for(var n,r=e.firstElementChild;r;){if(r.matches(t))return r;if(n=o(r,t))return n;r=r.nextElementSibling}return null}function i(e,t){return e.matches(t)}function a(e,t,n){var r=e.localName;return r===t||r===n&&e.namespaceURI===D}function s(){return!0}function c(e,t,n){return e.localName===n}function u(e,t){return e.namespaceURI===t}function l(e,t,n){return e.namespaceURI===t&&e.localName===n}function p(e,t,n,r,o,i){for(var a=e.firstElementChild;a;)r(a,o,i)&&(n[t++]=a),t=p(a,t,n,r,o,i),a=a.nextElementSibling;return t}function d(n,r,o,i,a){var s,c=g(this),u=v(this).root;if(u instanceof e.wrappers.ShadowRoot)return p(this,r,o,n,i,null);if(c instanceof L)s=M.call(c,i);else{if(!(c instanceof _))return p(this,r,o,n,i,null);s=S.call(c,i)}return t(s,r,o,a)}function f(n,r,o,i,a){var s,c=g(this),u=v(this).root;if(u instanceof e.wrappers.ShadowRoot)return p(this,r,o,n,i,a);if(c instanceof L)s=O.call(c,i,a);else{if(!(c instanceof _))return p(this,r,o,n,i,a);s=T.call(c,i,a)}return t(s,r,o,!1)}function h(n,r,o,i,a){var s,c=g(this),u=v(this).root;if(u instanceof e.wrappers.ShadowRoot)return p(this,r,o,n,i,a);if(c instanceof L)s=j.call(c,i,a);else{if(!(c instanceof _))return p(this,r,o,n,i,a);s=N.call(c,i,a)}return t(s,r,o,!1)}var w=e.wrappers.HTMLCollection,m=e.wrappers.NodeList,v=e.getTreeScope,g=e.unsafeUnwrap,b=e.wrap,y=document.querySelector,E=document.documentElement.querySelector,S=document.querySelectorAll,M=document.documentElement.querySelectorAll,T=document.getElementsByTagName,O=document.documentElement.getElementsByTagName,N=document.getElementsByTagNameNS,j=document.documentElement.getElementsByTagNameNS,L=window.Element,_=window.HTMLDocument||window.Document,D="http://www.w3.org/1999/xhtml",C={querySelector:function(t){var r=n(t),i=r!==t;t=r;var a,s=g(this),c=v(this).root;if(c instanceof e.wrappers.ShadowRoot)return o(this,t);if(s instanceof L)a=b(E.call(s,t));else{if(!(s instanceof _))return o(this,t);a=b(y.call(s,t))}return a&&!i&&(c=v(a).root)&&c instanceof e.wrappers.ShadowRoot?o(this,t):a},querySelectorAll:function(e){var t=n(e),r=t!==e;e=t;var o=new m;return o.length=d.call(this,i,0,o,e,r),o}},H={matches:function(t){return t=r(t),e.originalMatches.call(g(this),t)}},x={getElementsByTagName:function(e){var t=new w,n="*"===e?s:a;return t.length=f.call(this,n,0,t,e,e.toLowerCase()),
t},getElementsByClassName:function(e){return this.querySelectorAll("."+e)},getElementsByTagNameNS:function(e,t){var n=new w,r=null;return r="*"===e?"*"===t?s:c:"*"===t?u:l,n.length=h.call(this,r,0,n,e||null,t),n}};e.GetElementsByInterface=x,e.SelectorsInterface=C,e.MatchesInterface=H}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){for(;e&&e.nodeType!==Node.ELEMENT_NODE;)e=e.nextSibling;return e}function n(e){for(;e&&e.nodeType!==Node.ELEMENT_NODE;)e=e.previousSibling;return e}var r=e.wrappers.NodeList,o={get firstElementChild(){return t(this.firstChild)},get lastElementChild(){return n(this.lastChild)},get childElementCount(){for(var e=0,t=this.firstElementChild;t;t=t.nextElementSibling)e++;return e},get children(){for(var e=new r,t=0,n=this.firstElementChild;n;n=n.nextElementSibling)e[t++]=n;return e.length=t,e},remove:function(){var e=this.parentNode;e&&e.removeChild(this)}},i={get nextElementSibling(){return t(this.nextSibling)},get previousElementSibling(){return n(this.previousSibling)}},a={getElementById:function(e){return/[ \t\n\r\f]/.test(e)?null:this.querySelector('[id="'+e+'"]')}};e.ChildNodeInterface=i,e.NonElementParentNodeInterface=a,e.ParentNodeInterface=o}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){r.call(this,e)}var n=e.ChildNodeInterface,r=e.wrappers.Node,o=e.enqueueMutation,i=e.mixin,a=e.registerWrapper,s=e.unsafeUnwrap,c=window.CharacterData;t.prototype=Object.create(r.prototype),i(t.prototype,{get nodeValue(){return this.data},set nodeValue(e){this.data=e},get textContent(){return this.data},set textContent(e){this.data=e},get data(){return s(this).data},set data(e){var t=s(this).data;o(this,"characterData",{oldValue:t}),s(this).data=e}}),i(t.prototype,n),a(c,t,document.createTextNode("")),e.wrappers.CharacterData=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){return e>>>0}function n(e){r.call(this,e)}var r=e.wrappers.CharacterData,o=(e.enqueueMutation,e.mixin),i=e.registerWrapper,a=window.Text;n.prototype=Object.create(r.prototype),o(n.prototype,{splitText:function(e){e=t(e);var n=this.data;if(e>n.length)throw new Error("IndexSizeError");var r=n.slice(0,e),o=n.slice(e);this.data=r;var i=this.ownerDocument.createTextNode(o);return this.parentNode&&this.parentNode.insertBefore(i,this.nextSibling),i}}),i(a,n,document.createTextNode("")),e.wrappers.Text=n}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){return i(e).getAttribute("class")}function n(e,t){a(e,"attributes",{name:"class",namespace:null,oldValue:t})}function r(t){e.invalidateRendererBasedOnAttribute(t,"class")}function o(e,o,i){var a=e.ownerElement_;if(null==a)return o.apply(e,i);var s=t(a),c=o.apply(e,i);return t(a)!==s&&(n(a,s),r(a)),c}if(!window.DOMTokenList)return void console.warn("Missing DOMTokenList prototype, please include a compatible classList polyfill such as http://goo.gl/uTcepH.");var i=e.unsafeUnwrap,a=e.enqueueMutation,s=DOMTokenList.prototype.add;DOMTokenList.prototype.add=function(){o(this,s,arguments)};var c=DOMTokenList.prototype.remove;DOMTokenList.prototype.remove=function(){o(this,c,arguments)};var u=DOMTokenList.prototype.toggle;DOMTokenList.prototype.toggle=function(){return o(this,u,arguments)}}(window.ShadowDOMPolyfill),function(e){"use strict";function t(t,n){var r=t.parentNode;if(r&&r.shadowRoot){var o=e.getRendererForHost(r);o.dependsOnAttribute(n)&&o.invalidate()}}function n(e,t,n){l(e,"attributes",{name:t,namespace:null,oldValue:n})}function r(e){a.call(this,e)}var o=e.ChildNodeInterface,i=e.GetElementsByInterface,a=e.wrappers.Node,s=e.ParentNodeInterface,c=e.SelectorsInterface,u=e.MatchesInterface,l=(e.addWrapNodeListMethod,e.enqueueMutation),p=e.mixin,d=(e.oneOf,e.registerWrapper),f=e.unsafeUnwrap,h=e.wrappers,w=window.Element,m=["matches","mozMatchesSelector","msMatchesSelector","webkitMatchesSelector"].filter(function(e){return w.prototype[e]}),v=m[0],g=w.prototype[v],b=new WeakMap;r.prototype=Object.create(a.prototype),p(r.prototype,{createShadowRoot:function(){var t=new h.ShadowRoot(this);f(this).polymerShadowRoot_=t;var n=e.getRendererForHost(this);return n.invalidate(),t},get shadowRoot(){return f(this).polymerShadowRoot_||null},setAttribute:function(e,r){var o=f(this).getAttribute(e);f(this).setAttribute(e,r),n(this,e,o),t(this,e)},removeAttribute:function(e){var r=f(this).getAttribute(e);f(this).removeAttribute(e),n(this,e,r),t(this,e)},get classList(){var e=b.get(this);if(!e){if(e=f(this).classList,!e)return;e.ownerElement_=this,b.set(this,e)}return e},get className(){return f(this).className},set className(e){this.setAttribute("class",e)},get id(){return f(this).id},set id(e){this.setAttribute("id",e)}}),m.forEach(function(e){"matches"!==e&&(r.prototype[e]=function(e){return this.matches(e)})}),w.prototype.webkitCreateShadowRoot&&(r.prototype.webkitCreateShadowRoot=r.prototype.createShadowRoot),p(r.prototype,o),p(r.prototype,i),p(r.prototype,s),p(r.prototype,c),p(r.prototype,u),d(w,r,document.createElementNS(null,"x")),e.invalidateRendererBasedOnAttribute=t,e.matchesNames=m,e.originalMatches=g,e.wrappers.Element=r}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){switch(e){case"&":return"&";case"<":return"<";case">":return">";case'"':return""";case" ":return" "}}function n(e){return e.replace(j,t)}function r(e){return e.replace(L,t)}function o(e){for(var t={},n=0;n<e.length;n++)t[e[n]]=!0;return t}function i(e){if(e.namespaceURI!==C)return!0;var t=e.ownerDocument.doctype;return t&&t.publicId&&t.systemId}function a(e,t){switch(e.nodeType){case Node.ELEMENT_NODE:for(var o,a=e.tagName.toLowerCase(),c="<"+a,u=e.attributes,l=0;o=u[l];l++)c+=" "+o.name+'="'+n(o.value)+'"';return _[a]?(i(e)&&(c+="/"),c+">"):c+">"+s(e)+"</"+a+">";case Node.TEXT_NODE:var p=e.data;return t&&D[t.localName]?p:r(p);case Node.COMMENT_NODE:return"<!--"+e.data+"-->";default:throw console.error(e),new Error("not implemented")}}function s(e){e instanceof N.HTMLTemplateElement&&(e=e.content);for(var t="",n=e.firstChild;n;n=n.nextSibling)t+=a(n,e);return t}function c(e,t,n){var r=n||"div";e.textContent="";var o=T(e.ownerDocument.createElement(r));o.innerHTML=t;for(var i;i=o.firstChild;)e.appendChild(O(i))}function u(e){w.call(this,e)}function l(e,t){var n=T(e.cloneNode(!1));n.innerHTML=t;for(var r,o=T(document.createDocumentFragment());r=n.firstChild;)o.appendChild(r);return O(o)}function p(t){return function(){return e.renderAllPending(),M(this)[t]}}function d(e){m(u,e,p(e))}function f(t){Object.defineProperty(u.prototype,t,{get:p(t),set:function(n){e.renderAllPending(),M(this)[t]=n},configurable:!0,enumerable:!0})}function h(t){Object.defineProperty(u.prototype,t,{value:function(){return e.renderAllPending(),M(this)[t].apply(M(this),arguments)},configurable:!0,enumerable:!0})}var w=e.wrappers.Element,m=e.defineGetter,v=e.enqueueMutation,g=e.mixin,b=e.nodesWereAdded,y=e.nodesWereRemoved,E=e.registerWrapper,S=e.snapshotNodeList,M=e.unsafeUnwrap,T=e.unwrap,O=e.wrap,N=e.wrappers,j=/[&\u00A0"]/g,L=/[&\u00A0<>]/g,_=o(["area","base","br","col","command","embed","hr","img","input","keygen","link","meta","param","source","track","wbr"]),D=o(["style","script","xmp","iframe","noembed","noframes","plaintext","noscript"]),C="http://www.w3.org/1999/xhtml",H=/MSIE/.test(navigator.userAgent),x=window.HTMLElement,R=window.HTMLTemplateElement;u.prototype=Object.create(w.prototype),g(u.prototype,{get innerHTML(){return s(this)},set innerHTML(e){if(H&&D[this.localName])return void(this.textContent=e);var t=S(this.childNodes);this.invalidateShadowRenderer()?this instanceof N.HTMLTemplateElement?c(this.content,e):c(this,e,this.tagName):!R&&this instanceof N.HTMLTemplateElement?c(this.content,e):M(this).innerHTML=e;var n=S(this.childNodes);v(this,"childList",{addedNodes:n,removedNodes:t}),y(t),b(n,this)},get outerHTML(){return a(this,this.parentNode)},set outerHTML(e){var t=this.parentNode;if(t){t.invalidateShadowRenderer();var n=l(t,e);t.replaceChild(n,this)}},insertAdjacentHTML:function(e,t){var n,r;switch(String(e).toLowerCase()){case"beforebegin":n=this.parentNode,r=this;break;case"afterend":n=this.parentNode,r=this.nextSibling;break;case"afterbegin":n=this,r=this.firstChild;break;case"beforeend":n=this,r=null;break;default:return}var o=l(n,t);n.insertBefore(o,r)},get hidden(){return this.hasAttribute("hidden")},set hidden(e){e?this.setAttribute("hidden",""):this.removeAttribute("hidden")}}),["clientHeight","clientLeft","clientTop","clientWidth","offsetHeight","offsetLeft","offsetTop","offsetWidth","scrollHeight","scrollWidth"].forEach(d),["scrollLeft","scrollTop"].forEach(f),["focus","getBoundingClientRect","getClientRects","scrollIntoView"].forEach(h),E(x,u,document.createElement("b")),e.wrappers.HTMLElement=u,e.getInnerHTML=s,e.setInnerHTML=c}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=e.mixin,o=e.registerWrapper,i=e.unsafeUnwrap,a=e.wrap,s=window.HTMLCanvasElement;t.prototype=Object.create(n.prototype),r(t.prototype,{getContext:function(){var e=i(this).getContext.apply(i(this),arguments);return e&&a(e)}}),o(s,t,document.createElement("canvas")),e.wrappers.HTMLCanvasElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=e.mixin,o=e.registerWrapper,i=window.HTMLContentElement;t.prototype=Object.create(n.prototype),r(t.prototype,{constructor:t,get select(){return this.getAttribute("select")},set select(e){this.setAttribute("select",e)},setAttribute:function(e,t){n.prototype.setAttribute.call(this,e,t),"select"===String(e).toLowerCase()&&this.invalidateShadowRenderer(!0)}}),i&&o(i,t),e.wrappers.HTMLContentElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=e.mixin,o=e.registerWrapper,i=e.wrapHTMLCollection,a=e.unwrap,s=window.HTMLFormElement;t.prototype=Object.create(n.prototype),r(t.prototype,{get elements(){return i(a(this).elements)}}),o(s,t,document.createElement("form")),e.wrappers.HTMLFormElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){r.call(this,e)}function n(e,t){if(!(this instanceof n))throw new TypeError("DOM object constructor cannot be called as a function.");var o=i(document.createElement("img"));r.call(this,o),a(o,this),void 0!==e&&(o.width=e),void 0!==t&&(o.height=t)}var r=e.wrappers.HTMLElement,o=e.registerWrapper,i=e.unwrap,a=e.rewrap,s=window.HTMLImageElement;t.prototype=Object.create(r.prototype),o(s,t,document.createElement("img")),n.prototype=t.prototype,e.wrappers.HTMLImageElement=t,e.wrappers.Image=n}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=(e.mixin,e.wrappers.NodeList,e.registerWrapper),o=window.HTMLShadowElement;t.prototype=Object.create(n.prototype),t.prototype.constructor=t,o&&r(o,t),e.wrappers.HTMLShadowElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){if(!e.defaultView)return e;var t=p.get(e);if(!t){for(t=e.implementation.createHTMLDocument("");t.lastChild;)t.removeChild(t.lastChild);p.set(e,t)}return t}function n(e){for(var n,r=t(e.ownerDocument),o=c(r.createDocumentFragment());n=e.firstChild;)o.appendChild(n);return o}function r(e){if(o.call(this,e),!d){var t=n(e);l.set(this,u(t))}}var o=e.wrappers.HTMLElement,i=e.mixin,a=e.registerWrapper,s=e.unsafeUnwrap,c=e.unwrap,u=e.wrap,l=new WeakMap,p=new WeakMap,d=window.HTMLTemplateElement;r.prototype=Object.create(o.prototype),i(r.prototype,{constructor:r,get content(){return d?u(s(this).content):l.get(this)}}),d&&a(d,r),e.wrappers.HTMLTemplateElement=r}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=e.registerWrapper,o=window.HTMLMediaElement;o&&(t.prototype=Object.create(n.prototype),r(o,t,document.createElement("audio")),e.wrappers.HTMLMediaElement=t)}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){r.call(this,e)}function n(e){if(!(this instanceof n))throw new TypeError("DOM object constructor cannot be called as a function.");var t=i(document.createElement("audio"));r.call(this,t),a(t,this),t.setAttribute("preload","auto"),void 0!==e&&t.setAttribute("src",e)}var r=e.wrappers.HTMLMediaElement,o=e.registerWrapper,i=e.unwrap,a=e.rewrap,s=window.HTMLAudioElement;s&&(t.prototype=Object.create(r.prototype),o(s,t,document.createElement("audio")),n.prototype=t.prototype,e.wrappers.HTMLAudioElement=t,e.wrappers.Audio=n)}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){return e.replace(/\s+/g," ").trim()}function n(e){o.call(this,e)}function r(e,t,n,i){if(!(this instanceof r))throw new TypeError("DOM object constructor cannot be called as a function.");var a=c(document.createElement("option"));o.call(this,a),s(a,this),void 0!==e&&(a.text=e),void 0!==t&&a.setAttribute("value",t),n===!0&&a.setAttribute("selected",""),a.selected=i===!0}var o=e.wrappers.HTMLElement,i=e.mixin,a=e.registerWrapper,s=e.rewrap,c=e.unwrap,u=e.wrap,l=window.HTMLOptionElement;n.prototype=Object.create(o.prototype),i(n.prototype,{get text(){return t(this.textContent)},set text(e){this.textContent=t(String(e))},get form(){return u(c(this).form)}}),a(l,n,document.createElement("option")),r.prototype=n.prototype,e.wrappers.HTMLOptionElement=n,e.wrappers.Option=r}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=e.mixin,o=e.registerWrapper,i=e.unwrap,a=e.wrap,s=window.HTMLSelectElement;t.prototype=Object.create(n.prototype),r(t.prototype,{add:function(e,t){"object"==typeof t&&(t=i(t)),i(this).add(i(e),t)},remove:function(e){return void 0===e?void n.prototype.remove.call(this):("object"==typeof e&&(e=i(e)),void i(this).remove(e))},get form(){return a(i(this).form)}}),o(s,t,document.createElement("select")),e.wrappers.HTMLSelectElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=e.mixin,o=e.registerWrapper,i=e.unwrap,a=e.wrap,s=e.wrapHTMLCollection,c=window.HTMLTableElement;t.prototype=Object.create(n.prototype),r(t.prototype,{get caption(){return a(i(this).caption)},createCaption:function(){return a(i(this).createCaption())},get tHead(){return a(i(this).tHead)},createTHead:function(){return a(i(this).createTHead())},createTFoot:function(){return a(i(this).createTFoot())},get tFoot(){return a(i(this).tFoot)},get tBodies(){return s(i(this).tBodies)},createTBody:function(){return a(i(this).createTBody())},get rows(){return s(i(this).rows)},insertRow:function(e){return a(i(this).insertRow(e))}}),o(c,t,document.createElement("table")),e.wrappers.HTMLTableElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=e.mixin,o=e.registerWrapper,i=e.wrapHTMLCollection,a=e.unwrap,s=e.wrap,c=window.HTMLTableSectionElement;t.prototype=Object.create(n.prototype),r(t.prototype,{constructor:t,get rows(){return i(a(this).rows)},insertRow:function(e){return s(a(this).insertRow(e))}}),o(c,t,document.createElement("thead")),e.wrappers.HTMLTableSectionElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.HTMLElement,r=e.mixin,o=e.registerWrapper,i=e.wrapHTMLCollection,a=e.unwrap,s=e.wrap,c=window.HTMLTableRowElement;t.prototype=Object.create(n.prototype),r(t.prototype,{get cells(){return i(a(this).cells)},insertCell:function(e){return s(a(this).insertCell(e))}}),o(c,t,document.createElement("tr")),e.wrappers.HTMLTableRowElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){switch(e.localName){case"content":return new n(e);case"shadow":return new o(e);case"template":return new i(e)}r.call(this,e)}var n=e.wrappers.HTMLContentElement,r=e.wrappers.HTMLElement,o=e.wrappers.HTMLShadowElement,i=e.wrappers.HTMLTemplateElement,a=(e.mixin,e.registerWrapper),s=window.HTMLUnknownElement;t.prototype=Object.create(r.prototype),a(s,t),e.wrappers.HTMLUnknownElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.Element,r=e.wrappers.HTMLElement,o=e.registerWrapper,i=(e.defineWrapGetter,e.unsafeUnwrap),a=e.wrap,s=e.mixin,c="http://www.w3.org/2000/svg",u=window.SVGElement,l=document.createElementNS(c,"title");if(!("classList"in l)){var p=Object.getOwnPropertyDescriptor(n.prototype,"classList");Object.defineProperty(r.prototype,"classList",p),delete n.prototype.classList}t.prototype=Object.create(n.prototype),s(t.prototype,{get ownerSVGElement(){return a(i(this).ownerSVGElement)}}),o(u,t,document.createElementNS(c,"title")),e.wrappers.SVGElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){d.call(this,e)}var n=e.mixin,r=e.registerWrapper,o=e.unwrap,i=e.wrap,a=window.SVGUseElement,s="http://www.w3.org/2000/svg",c=i(document.createElementNS(s,"g")),u=document.createElementNS(s,"use"),l=c.constructor,p=Object.getPrototypeOf(l.prototype),d=p.constructor;t.prototype=Object.create(p),"instanceRoot"in u&&n(t.prototype,{get instanceRoot(){return i(o(this).instanceRoot)},get animatedInstanceRoot(){return i(o(this).animatedInstanceRoot)}}),r(a,t,u),e.wrappers.SVGUseElement=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.EventTarget,r=e.mixin,o=e.registerWrapper,i=e.unsafeUnwrap,a=e.wrap,s=window.SVGElementInstance;s&&(t.prototype=Object.create(n.prototype),r(t.prototype,{get correspondingElement(){return a(i(this).correspondingElement)},get correspondingUseElement(){return a(i(this).correspondingUseElement)},get parentNode(){return a(i(this).parentNode)},get childNodes(){throw new Error("Not implemented")},get firstChild(){return a(i(this).firstChild)},get lastChild(){return a(i(this).lastChild)},get previousSibling(){return a(i(this).previousSibling)},get nextSibling(){return a(i(this).nextSibling)}}),o(s,t),e.wrappers.SVGElementInstance=t)}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){o(e,this)}var n=e.mixin,r=e.registerWrapper,o=e.setWrapper,i=e.unsafeUnwrap,a=e.unwrap,s=e.unwrapIfNeeded,c=e.wrap,u=window.CanvasRenderingContext2D;n(t.prototype,{get canvas(){return c(i(this).canvas)},drawImage:function(){arguments[0]=s(arguments[0]),i(this).drawImage.apply(i(this),arguments)},createPattern:function(){return arguments[0]=a(arguments[0]),i(this).createPattern.apply(i(this),arguments)}}),r(u,t,document.createElement("canvas").getContext("2d")),e.wrappers.CanvasRenderingContext2D=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){i(e,this)}var n=e.addForwardingProperties,r=e.mixin,o=e.registerWrapper,i=e.setWrapper,a=e.unsafeUnwrap,s=e.unwrapIfNeeded,c=e.wrap,u=window.WebGLRenderingContext;if(u){r(t.prototype,{get canvas(){return c(a(this).canvas)},texImage2D:function(){arguments[5]=s(arguments[5]),a(this).texImage2D.apply(a(this),arguments)},texSubImage2D:function(){arguments[6]=s(arguments[6]),a(this).texSubImage2D.apply(a(this),arguments)}});var l=Object.getPrototypeOf(u.prototype);l!==Object.prototype&&n(l,t.prototype);var p=/WebKit/.test(navigator.userAgent)?{drawingBufferHeight:null,drawingBufferWidth:null}:{};o(u,t,p),e.wrappers.WebGLRenderingContext=t}}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.Node,r=e.GetElementsByInterface,o=e.NonElementParentNodeInterface,i=e.ParentNodeInterface,a=e.SelectorsInterface,s=e.mixin,c=e.registerObject,u=e.registerWrapper,l=window.DocumentFragment;t.prototype=Object.create(n.prototype),s(t.prototype,i),s(t.prototype,a),s(t.prototype,r),s(t.prototype,o),u(l,t,document.createDocumentFragment()),e.wrappers.DocumentFragment=t;var p=c(document.createComment(""));e.wrappers.Comment=p}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){var t=p(l(e).ownerDocument.createDocumentFragment());n.call(this,t),c(t,this);var o=e.shadowRoot;h.set(this,o),this.treeScope_=new r(this,a(o||e)),f.set(this,e)}var n=e.wrappers.DocumentFragment,r=e.TreeScope,o=e.elementFromPoint,i=e.getInnerHTML,a=e.getTreeScope,s=e.mixin,c=e.rewrap,u=e.setInnerHTML,l=e.unsafeUnwrap,p=e.unwrap,d=e.wrap,f=new WeakMap,h=new WeakMap;t.prototype=Object.create(n.prototype),s(t.prototype,{constructor:t,get innerHTML(){return i(this)},set innerHTML(e){u(this,e),this.invalidateShadowRenderer()},get olderShadowRoot(){return h.get(this)||null},get host(){return f.get(this)||null},invalidateShadowRenderer:function(){return f.get(this).invalidateShadowRenderer()},elementFromPoint:function(e,t){return o(this,this.ownerDocument,e,t)},getSelection:function(){return document.getSelection()},get activeElement(){var e=p(this).ownerDocument.activeElement;if(!e||!e.nodeType)return null;for(var t=d(e);!this.contains(t);){for(;t.parentNode;)t=t.parentNode;if(!t.host)return null;t=t.host}return t}}),e.wrappers.ShadowRoot=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){var t=p(e).root;return t instanceof f?t.host:null}function n(t,n){if(t.shadowRoot){n=Math.min(t.childNodes.length-1,n);var r=t.childNodes[n];if(r){var o=e.getDestinationInsertionPoints(r);if(o.length>0){var i=o[0].parentNode;i.nodeType==Node.ELEMENT_NODE&&(t=i)}}}return t}function r(e){return e=l(e),t(e)||e}function o(e){a(e,this)}var i=e.registerWrapper,a=e.setWrapper,s=e.unsafeUnwrap,c=e.unwrap,u=e.unwrapIfNeeded,l=e.wrap,p=e.getTreeScope,d=window.Range,f=e.wrappers.ShadowRoot;o.prototype={get startContainer(){return r(s(this).startContainer)},get endContainer(){return r(s(this).endContainer)},get commonAncestorContainer(){return r(s(this).commonAncestorContainer)},setStart:function(e,t){e=n(e,t),s(this).setStart(u(e),t)},setEnd:function(e,t){e=n(e,t),s(this).setEnd(u(e),t)},setStartBefore:function(e){s(this).setStartBefore(u(e))},setStartAfter:function(e){s(this).setStartAfter(u(e))},setEndBefore:function(e){s(this).setEndBefore(u(e))},setEndAfter:function(e){s(this).setEndAfter(u(e))},selectNode:function(e){s(this).selectNode(u(e))},selectNodeContents:function(e){s(this).selectNodeContents(u(e))},compareBoundaryPoints:function(e,t){return s(this).compareBoundaryPoints(e,c(t))},extractContents:function(){return l(s(this).extractContents())},cloneContents:function(){return l(s(this).cloneContents())},insertNode:function(e){s(this).insertNode(u(e))},surroundContents:function(e){s(this).surroundContents(u(e))},cloneRange:function(){return l(s(this).cloneRange())},isPointInRange:function(e,t){return s(this).isPointInRange(u(e),t)},comparePoint:function(e,t){return s(this).comparePoint(u(e),t)},intersectsNode:function(e){return s(this).intersectsNode(u(e))},toString:function(){return s(this).toString()}},d.prototype.createContextualFragment&&(o.prototype.createContextualFragment=function(e){return l(s(this).createContextualFragment(e))}),i(window.Range,o,document.createRange()),e.wrappers.Range=o}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){e.previousSibling_=e.previousSibling,e.nextSibling_=e.nextSibling,e.parentNode_=e.parentNode}function n(n,o,i){var a=x(n),s=x(o),c=i?x(i):null;if(r(o),t(o),i)n.firstChild===i&&(n.firstChild_=i),i.previousSibling_=i.previousSibling;else{n.lastChild_=n.lastChild,n.lastChild===n.firstChild&&(n.firstChild_=n.firstChild);var u=R(a.lastChild);u&&(u.nextSibling_=u.nextSibling)}e.originalInsertBefore.call(a,s,c)}function r(n){var r=x(n),o=r.parentNode;if(o){var i=R(o);t(n),n.previousSibling&&(n.previousSibling.nextSibling_=n),n.nextSibling&&(n.nextSibling.previousSibling_=n),i.lastChild===n&&(i.lastChild_=n),i.firstChild===n&&(i.firstChild_=n),e.originalRemoveChild.call(o,r)}}function o(e){W.set(e,[])}function i(e){var t=W.get(e);return t||W.set(e,t=[]),t}function a(e){for(var t=[],n=0,r=e.firstChild;r;r=r.nextSibling)t[n++]=r;return t}function s(){for(var e=0;e<F.length;e++){var t=F[e],n=t.parentRenderer;n&&n.dirty||t.render()}F=[]}function c(){T=null,s()}function u(e){var t=A.get(e);return t||(t=new f(e),A.set(e,t)),t}function l(e){var t=D(e).root;return t instanceof _?t:null}function p(e){return u(e.host)}function d(e){this.skip=!1,this.node=e,this.childNodes=[]}function f(e){this.host=e,this.dirty=!1,this.invalidateAttributes(),this.associateNode(e)}function h(e){for(var t=[],n=e.firstChild;n;n=n.nextSibling)E(n)?t.push.apply(t,i(n)):t.push(n);return t}function w(e){if(e instanceof j)return e;if(e instanceof N)return null;for(var t=e.firstChild;t;t=t.nextSibling){var n=w(t);if(n)return n}return null}function m(e,t){i(t).push(e);var n=I.get(e);n?n.push(t):I.set(e,[t])}function v(e){return I.get(e)}function g(e){I.set(e,void 0)}function b(e,t){var n=t.getAttribute("select");if(!n)return!0;if(n=n.trim(),!n)return!0;if(!(e instanceof O))return!1;if(!U.test(n))return!1;try{return e.matches(n)}catch(r){return!1}}function y(e,t){var n=v(t);return n&&n[n.length-1]===e}function E(e){return e instanceof N||e instanceof j}function S(e){return e.shadowRoot}function M(e){for(var t=[],n=e.shadowRoot;n;n=n.olderShadowRoot)t.push(n);return t}var T,O=e.wrappers.Element,N=e.wrappers.HTMLContentElement,j=e.wrappers.HTMLShadowElement,L=e.wrappers.Node,_=e.wrappers.ShadowRoot,D=(e.assert,e.getTreeScope),C=(e.mixin,e.oneOf),H=e.unsafeUnwrap,x=e.unwrap,R=e.wrap,P=e.ArraySplice,W=new WeakMap,I=new WeakMap,A=new WeakMap,k=C(window,["requestAnimationFrame","mozRequestAnimationFrame","webkitRequestAnimationFrame","setTimeout"]),F=[],B=new P;B.equals=function(e,t){return x(e.node)===t},d.prototype={append:function(e){var t=new d(e);return this.childNodes.push(t),t},sync:function(e){if(!this.skip){for(var t=this.node,o=this.childNodes,i=a(x(t)),s=e||new WeakMap,c=B.calculateSplices(o,i),u=0,l=0,p=0,d=0;d<c.length;d++){for(var f=c[d];p<f.index;p++)l++,o[u++].sync(s);for(var h=f.removed.length,w=0;w<h;w++){var m=R(i[l++]);s.get(m)||r(m)}for(var v=f.addedCount,g=i[l]&&R(i[l]),w=0;w<v;w++){var b=o[u++],y=b.node;n(t,y,g),s.set(y,!0),b.sync(s)}p+=v}for(var d=p;d<o.length;d++)o[d].sync(s)}}},f.prototype={render:function(e){if(this.dirty){this.invalidateAttributes();var t=this.host;this.distribution(t);var n=e||new d(t);this.buildRenderTree(n,t);var r=!e;r&&n.sync(),this.dirty=!1}},get parentRenderer(){return D(this.host).renderer},invalidate:function(){if(!this.dirty){this.dirty=!0;var e=this.parentRenderer;if(e&&e.invalidate(),F.push(this),T)return;T=window[k](c,0)}},distribution:function(e){this.resetAllSubtrees(e),this.distributionResolution(e)},resetAll:function(e){E(e)?o(e):g(e),this.resetAllSubtrees(e)},resetAllSubtrees:function(e){for(var t=e.firstChild;t;t=t.nextSibling)this.resetAll(t);e.shadowRoot&&this.resetAll(e.shadowRoot),e.olderShadowRoot&&this.resetAll(e.olderShadowRoot)},distributionResolution:function(e){if(S(e)){for(var t=e,n=h(t),r=M(t),o=0;o<r.length;o++)this.poolDistribution(r[o],n);for(var o=r.length-1;o>=0;o--){var i=r[o],a=w(i);if(a){var s=i.olderShadowRoot;s&&(n=h(s));for(var c=0;c<n.length;c++)m(n[c],a)}this.distributionResolution(i)}}for(var u=e.firstChild;u;u=u.nextSibling)this.distributionResolution(u)},poolDistribution:function(e,t){if(!(e instanceof j))if(e instanceof N){var n=e;this.updateDependentAttributes(n.getAttribute("select"));for(var r=!1,o=0;o<t.length;o++){var e=t[o];e&&b(e,n)&&(m(e,n),t[o]=void 0,r=!0)}if(!r)for(var i=n.firstChild;i;i=i.nextSibling)m(i,n)}else for(var i=e.firstChild;i;i=i.nextSibling)this.poolDistribution(i,t)},buildRenderTree:function(e,t){for(var n=this.compose(t),r=0;r<n.length;r++){var o=n[r],i=e.append(o);this.buildRenderTree(i,o)}if(S(t)){var a=u(t);a.dirty=!1}},compose:function(e){for(var t=[],n=e.shadowRoot||e,r=n.firstChild;r;r=r.nextSibling)if(E(r)){this.associateNode(n);for(var o=i(r),a=0;a<o.length;a++){var s=o[a];y(r,s)&&t.push(s)}}else t.push(r);return t},invalidateAttributes:function(){this.attributes=Object.create(null)},updateDependentAttributes:function(e){if(e){var t=this.attributes;/\.\w+/.test(e)&&(t["class"]=!0),/#\w+/.test(e)&&(t.id=!0),e.replace(/\[\s*([^\s=\|~\]]+)/g,function(e,n){t[n]=!0})}},dependsOnAttribute:function(e){return this.attributes[e]},associateNode:function(e){H(e).polymerShadowRenderer_=this}};var U=/^(:not\()?[*.#[a-zA-Z_|]/;L.prototype.invalidateShadowRenderer=function(e){var t=H(this).polymerShadowRenderer_;return!!t&&(t.invalidate(),!0)},N.prototype.getDistributedNodes=j.prototype.getDistributedNodes=function(){return s(),i(this)},O.prototype.getDestinationInsertionPoints=function(){return s(),v(this)||[]},N.prototype.nodeIsInserted_=j.prototype.nodeIsInserted_=function(){this.invalidateShadowRenderer();var e,t=l(this);t&&(e=p(t)),H(this).polymerShadowRenderer_=e,e&&e.invalidate()},e.getRendererForHost=u,e.getShadowTrees=M,e.renderAllPending=s,e.getDestinationInsertionPoints=v,e.visual={insertBefore:n,remove:r}}(window.ShadowDOMPolyfill),function(e){"use strict";function t(t){if(window[t]){r(!e.wrappers[t]);var c=function(e){n.call(this,e)};c.prototype=Object.create(n.prototype),o(c.prototype,{get form(){return s(a(this).form)}}),i(window[t],c,document.createElement(t.slice(4,-7))),e.wrappers[t]=c}}var n=e.wrappers.HTMLElement,r=e.assert,o=e.mixin,i=e.registerWrapper,a=e.unwrap,s=e.wrap,c=["HTMLButtonElement","HTMLFieldSetElement","HTMLInputElement","HTMLKeygenElement","HTMLLabelElement","HTMLLegendElement","HTMLObjectElement","HTMLOutputElement","HTMLTextAreaElement"];c.forEach(t)}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){r(e,this)}var n=e.registerWrapper,r=e.setWrapper,o=e.unsafeUnwrap,i=e.unwrap,a=e.unwrapIfNeeded,s=e.wrap,c=window.Selection;t.prototype={get anchorNode(){return s(o(this).anchorNode)},get focusNode(){return s(o(this).focusNode)},addRange:function(e){o(this).addRange(a(e))},collapse:function(e,t){o(this).collapse(a(e),t)},containsNode:function(e,t){return o(this).containsNode(a(e),t)},getRangeAt:function(e){return s(o(this).getRangeAt(e))},removeRange:function(e){o(this).removeRange(i(e))},selectAllChildren:function(e){o(this).selectAllChildren(e instanceof ShadowRoot?o(e.host):a(e))},toString:function(){return o(this).toString()}},c.prototype.extend&&(t.prototype.extend=function(e,t){o(this).extend(a(e),t)}),n(window.Selection,t,window.getSelection()),e.wrappers.Selection=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){r(e,this)}var n=e.registerWrapper,r=e.setWrapper,o=e.unsafeUnwrap,i=e.unwrapIfNeeded,a=e.wrap,s=window.TreeWalker;t.prototype={get root(){return a(o(this).root)},get currentNode(){return a(o(this).currentNode)},set currentNode(e){o(this).currentNode=i(e)},get filter(){return o(this).filter},parentNode:function(){return a(o(this).parentNode())},firstChild:function(){return a(o(this).firstChild())},lastChild:function(){return a(o(this).lastChild())},previousSibling:function(){return a(o(this).previousSibling())},previousNode:function(){return a(o(this).previousNode())},nextNode:function(){return a(o(this).nextNode())}},n(s,t),e.wrappers.TreeWalker=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){l.call(this,e),this.treeScope_=new m(this,null)}function n(e){var n=document[e];t.prototype[e]=function(){return D(n.apply(L(this),arguments))}}function r(e,t){x.call(L(t),_(e)),o(e,t)}function o(e,t){e.shadowRoot&&t.adoptNode(e.shadowRoot),e instanceof w&&i(e,t);for(var n=e.firstChild;n;n=n.nextSibling)o(n,t)}function i(e,t){var n=e.olderShadowRoot;n&&t.adoptNode(n)}function a(e){j(e,this)}function s(e,t){var n=document.implementation[t];e.prototype[t]=function(){return D(n.apply(L(this),arguments))}}function c(e,t){var n=document.implementation[t];e.prototype[t]=function(){return n.apply(L(this),arguments)}}var u=e.GetElementsByInterface,l=e.wrappers.Node,p=e.ParentNodeInterface,d=e.NonElementParentNodeInterface,f=e.wrappers.Selection,h=e.SelectorsInterface,w=e.wrappers.ShadowRoot,m=e.TreeScope,v=e.cloneNode,g=e.defineGetter,b=e.defineWrapGetter,y=e.elementFromPoint,E=e.forwardMethodsToWrapper,S=e.matchesNames,M=e.mixin,T=e.registerWrapper,O=e.renderAllPending,N=e.rewrap,j=e.setWrapper,L=e.unsafeUnwrap,_=e.unwrap,D=e.wrap,C=e.wrapEventTargetMethods,H=(e.wrapNodeList,
new WeakMap);t.prototype=Object.create(l.prototype),b(t,"documentElement"),b(t,"body"),b(t,"head"),g(t,"activeElement",function(){var e=_(this).activeElement;if(!e||!e.nodeType)return null;for(var t=D(e);!this.contains(t);){for(;t.parentNode;)t=t.parentNode;if(!t.host)return null;t=t.host}return t}),["createComment","createDocumentFragment","createElement","createElementNS","createEvent","createEventNS","createRange","createTextNode"].forEach(n);var x=document.adoptNode,R=document.getSelection;M(t.prototype,{adoptNode:function(e){return e.parentNode&&e.parentNode.removeChild(e),r(e,this),e},elementFromPoint:function(e,t){return y(this,this,e,t)},importNode:function(e,t){return v(e,t,L(this))},getSelection:function(){return O(),new f(R.call(_(this)))},getElementsByName:function(e){return h.querySelectorAll.call(this,"[name="+JSON.stringify(String(e))+"]")}});var P=document.createTreeWalker,W=e.wrappers.TreeWalker;if(t.prototype.createTreeWalker=function(e,t,n,r){var o=null;return n&&(n.acceptNode&&"function"==typeof n.acceptNode?o={acceptNode:function(e){return n.acceptNode(D(e))}}:"function"==typeof n&&(o=function(e){return n(D(e))})),new W(P.call(_(this),_(e),t,o,r))},document.registerElement){var I=document.registerElement;t.prototype.registerElement=function(t,n){function r(e){return e?void j(e,this):i?document.createElement(i,t):document.createElement(t)}var o,i;if(void 0!==n&&(o=n.prototype,i=n["extends"]),o||(o=Object.create(HTMLElement.prototype)),e.nativePrototypeTable.get(o))throw new Error("NotSupportedError");for(var a,s=Object.getPrototypeOf(o),c=[];s&&!(a=e.nativePrototypeTable.get(s));)c.push(s),s=Object.getPrototypeOf(s);if(!a)throw new Error("NotSupportedError");for(var u=Object.create(a),l=c.length-1;l>=0;l--)u=Object.create(u);["createdCallback","attachedCallback","detachedCallback","attributeChangedCallback"].forEach(function(e){var t=o[e];t&&(u[e]=function(){D(this)instanceof r||N(this),t.apply(D(this),arguments)})});var p={prototype:u};i&&(p["extends"]=i),r.prototype=o,r.prototype.constructor=r,e.constructorTable.set(u,r),e.nativePrototypeTable.set(o,u);I.call(_(this),t,p);return r},E([window.HTMLDocument||window.Document],["registerElement"])}E([window.HTMLBodyElement,window.HTMLDocument||window.Document,window.HTMLHeadElement,window.HTMLHtmlElement],["appendChild","compareDocumentPosition","contains","getElementsByClassName","getElementsByTagName","getElementsByTagNameNS","insertBefore","querySelector","querySelectorAll","removeChild","replaceChild"]),E([window.HTMLBodyElement,window.HTMLHeadElement,window.HTMLHtmlElement],S),E([window.HTMLDocument||window.Document],["adoptNode","importNode","contains","createComment","createDocumentFragment","createElement","createElementNS","createEvent","createEventNS","createRange","createTextNode","createTreeWalker","elementFromPoint","getElementById","getElementsByName","getSelection"]),M(t.prototype,u),M(t.prototype,p),M(t.prototype,h),M(t.prototype,d),M(t.prototype,{get implementation(){var e=H.get(this);return e?e:(e=new a(_(this).implementation),H.set(this,e),e)},get defaultView(){return D(_(this).defaultView)}}),T(window.Document,t,document.implementation.createHTMLDocument("")),window.HTMLDocument&&T(window.HTMLDocument,t),C([window.HTMLBodyElement,window.HTMLDocument||window.Document,window.HTMLHeadElement]);var A=document.implementation.createDocument;a.prototype.createDocument=function(){return arguments[2]=_(arguments[2]),D(A.apply(L(this),arguments))},s(a,"createDocumentType"),s(a,"createHTMLDocument"),c(a,"hasFeature"),T(window.DOMImplementation,a),E([window.DOMImplementation],["createDocument","createDocumentType","createHTMLDocument","hasFeature"]),e.adoptNodeNoRemove=r,e.wrappers.DOMImplementation=a,e.wrappers.Document=t}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){n.call(this,e)}var n=e.wrappers.EventTarget,r=e.wrappers.Selection,o=e.mixin,i=e.registerWrapper,a=e.renderAllPending,s=e.unwrap,c=e.unwrapIfNeeded,u=e.wrap,l=window.Window,p=window.getComputedStyle,d=window.getDefaultComputedStyle,f=window.getSelection;t.prototype=Object.create(n.prototype),l.prototype.getComputedStyle=function(e,t){return u(this||window).getComputedStyle(c(e),t)},d&&(l.prototype.getDefaultComputedStyle=function(e,t){return u(this||window).getDefaultComputedStyle(c(e),t)}),l.prototype.getSelection=function(){return u(this||window).getSelection()},delete window.getComputedStyle,delete window.getDefaultComputedStyle,delete window.getSelection,["addEventListener","removeEventListener","dispatchEvent"].forEach(function(e){l.prototype[e]=function(){var t=u(this||window);return t[e].apply(t,arguments)},delete window[e]}),o(t.prototype,{getComputedStyle:function(e,t){return a(),p.call(s(this),c(e),t)},getSelection:function(){return a(),new r(f.call(s(this)))},get document(){return u(s(this).document)}}),d&&(t.prototype.getDefaultComputedStyle=function(e,t){return a(),d.call(s(this),c(e),t)}),i(l,t,window),e.wrappers.Window=t}(window.ShadowDOMPolyfill),function(e){"use strict";var t=e.unwrap,n=window.DataTransfer||window.Clipboard,r=n.prototype.setDragImage;r&&(n.prototype.setDragImage=function(e,n,o){r.call(this,t(e),n,o)})}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){var t;t=e instanceof i?e:new i(e&&o(e)),r(t,this)}var n=e.registerWrapper,r=e.setWrapper,o=e.unwrap,i=window.FormData;i&&(n(i,t,new i),e.wrappers.FormData=t)}(window.ShadowDOMPolyfill),function(e){"use strict";var t=e.unwrapIfNeeded,n=XMLHttpRequest.prototype.send;XMLHttpRequest.prototype.send=function(e){return n.call(this,t(e))}}(window.ShadowDOMPolyfill),function(e){"use strict";function t(e){var t=n[e],r=window[t];if(r){var o=document.createElement(e),i=o.constructor;window[t]=i}}var n=(e.isWrapperFor,{a:"HTMLAnchorElement",area:"HTMLAreaElement",audio:"HTMLAudioElement",base:"HTMLBaseElement",body:"HTMLBodyElement",br:"HTMLBRElement",button:"HTMLButtonElement",canvas:"HTMLCanvasElement",caption:"HTMLTableCaptionElement",col:"HTMLTableColElement",content:"HTMLContentElement",data:"HTMLDataElement",datalist:"HTMLDataListElement",del:"HTMLModElement",dir:"HTMLDirectoryElement",div:"HTMLDivElement",dl:"HTMLDListElement",embed:"HTMLEmbedElement",fieldset:"HTMLFieldSetElement",font:"HTMLFontElement",form:"HTMLFormElement",frame:"HTMLFrameElement",frameset:"HTMLFrameSetElement",h1:"HTMLHeadingElement",head:"HTMLHeadElement",hr:"HTMLHRElement",html:"HTMLHtmlElement",iframe:"HTMLIFrameElement",img:"HTMLImageElement",input:"HTMLInputElement",keygen:"HTMLKeygenElement",label:"HTMLLabelElement",legend:"HTMLLegendElement",li:"HTMLLIElement",link:"HTMLLinkElement",map:"HTMLMapElement",marquee:"HTMLMarqueeElement",menu:"HTMLMenuElement",menuitem:"HTMLMenuItemElement",meta:"HTMLMetaElement",meter:"HTMLMeterElement",object:"HTMLObjectElement",ol:"HTMLOListElement",optgroup:"HTMLOptGroupElement",option:"HTMLOptionElement",output:"HTMLOutputElement",p:"HTMLParagraphElement",param:"HTMLParamElement",pre:"HTMLPreElement",progress:"HTMLProgressElement",q:"HTMLQuoteElement",script:"HTMLScriptElement",select:"HTMLSelectElement",shadow:"HTMLShadowElement",source:"HTMLSourceElement",span:"HTMLSpanElement",style:"HTMLStyleElement",table:"HTMLTableElement",tbody:"HTMLTableSectionElement",template:"HTMLTemplateElement",textarea:"HTMLTextAreaElement",thead:"HTMLTableSectionElement",time:"HTMLTimeElement",title:"HTMLTitleElement",tr:"HTMLTableRowElement",track:"HTMLTrackElement",ul:"HTMLUListElement",video:"HTMLVideoElement"});Object.keys(n).forEach(t),Object.getOwnPropertyNames(e.wrappers).forEach(function(t){window[t]=e.wrappers[t]})}(window.ShadowDOMPolyfill); | PypiClean |
/Bottlechest-0.7.1-cp34-cp34m-macosx_10_9_x86_64.whl/Bottlechest-0.7.1.dist-info/DESCRIPTION.rst | ===========
Bottlechest
===========
Introduction
============
Bottlechest is a fork of bottleneck (https://github.com/kwgoodman/bottleneck), specialized for use in Orange (https://github.com/biolab/orange3).
Moving window functions, several other functions and all optimization of 3d arrays are removed to reduce the size of the library. New functions are added as needed.
===================== =======================================================
NumPy/SciPy ``median, nanmedian, rankdata, ss, nansum, nanmin,
nanmax, nanmean, nanstd, nanargmin, nanargmax``
Functions ``nanrankdata, nanvar, replace, nn, anynan, allnan,
nanequal``
===================== =======================================================
For other documentation, including a simple example and comprehensive set of benchmarks, refer to the original project.
License
=======
Bottlechest is distributed under a Simplified BSD license. Parts of Bottleneck, NumPy,
Scipy, numpydoc and bottleneck, all of which have BSD licenses, are included in
Bottlechest. See the LICENSE file, which is distributed with Bottlechest, for
details.
Install
=======
Requirements:
======================== ====================================================
Bottlechest Python 2.6, 2.7, 3.2; NumPy 1.8
Unit tests nose
Compile gcc or MinGW
Optional SciPy 0.8, 0.9, 0.10 (portions of benchmark)
======================== ====================================================
| PypiClean |
/JoUtil-1.3.3-py3-none-any.whl/JoTools/txkj/parseXml.py |
from ..utils.XmlUtil import XmlUtil
# fixme 重写这个函数,速度更快
class ParseXml(object):
"""解析 xml 中的信息,将信息导出为 xml"""
def __init__(self):
self.__attrs = {"folder", "filename", "path", "segmented", "size", "source", "object"} # 所有的属性
self.__xml_info_dict = {} # xml 信息字典
self.__objects_info = []
self.__size_info = {}
self.__source_info = {}
def _parse_node(self, assign_node):
"""解析在字典中的关键字"""
node_name = assign_node.nodeName
element_info = XmlUtil.get_info_from_node(assign_node)
self.__xml_info_dict[node_name] = element_info['value']
def _parse_object(self, assign_node):
"""解析 object 中的数据"""
object_info = {}
for each_node in assign_node.childNodes:
node_name = each_node.nodeName
if node_name in ["name", "pose", "truncated", "difficult", "prob", "id"]:
object_info[node_name] = XmlUtil.get_info_from_node(each_node)['value']
elif node_name == "bndbox":
bndbox_info = {}
for each_node_2 in each_node.childNodes:
each_node_name = each_node_2.nodeName
if each_node_name in ["xmin", "ymin", "xmax", "ymax"]:
bndbox_info[each_node_name] = XmlUtil.get_info_from_node(each_node_2)['value']
object_info['bndbox'] = bndbox_info
self.__objects_info.append(object_info)
def _parse_size(self, assign_node):
"""解析 size 信息"""
for each_node in assign_node.childNodes:
node_name = each_node.nodeName
if node_name in ["width", "height", "depth"]:
self.__size_info[node_name] = XmlUtil.get_info_from_node(each_node)['value']
def _parse_source(self, assign_node):
"""解析 source 信息"""
for each_node in assign_node.childNodes:
node_name = each_node.nodeName
if node_name in ["database"]:
self.__source_info[node_name] = XmlUtil.get_info_from_node(each_node)['value']
def _parse_xml(self, xml_path):
"""解析 xml"""
root_node = XmlUtil.get_root_node(xml_path) # 得到根节点
# 遍历根节点下面的子节点
for each_node in root_node.childNodes:
node_name = each_node.nodeName
if node_name in ["folder", "filename", "path", "segmented"]:
self._parse_node(each_node)
elif node_name == "source":
self._parse_source(each_node)
elif node_name == "size":
self._parse_size(each_node)
elif node_name == "object":
self._parse_object(each_node)
def set_attr_info(self, attr, info):
"""设置属性值"""
if attr not in self.__attrs:
raise ValueError("""attr should in folder, filename, path, segmented, size, source, object""")
self.__xml_info_dict[attr] = info
def update_xml_info(self, up_info):
"""更新 xml 字典信息,up_info: dict"""
for each_attr in up_info:
if each_attr not in self.__attrs:
raise ValueError("""attr should in folder, filename, path, segmented, size, source, object""")
else:
self.__xml_info_dict[each_attr] = up_info[each_attr]
def get_xml_info(self, xml_path):
# 解析 xml
self.__xml_info_dict = {"folder": None, "filename": None, "path": None, "segmented": None}
self._parse_xml(xml_path)
# 将 xml 中的信息整理输出
self.__xml_info_dict['size'] = self.__size_info
self.__xml_info_dict['source'] = self.__source_info
self.__xml_info_dict['object'] = self.__objects_info
return self.__xml_info_dict
def save_to_xml(self, save_path, assign_xml_info=None):
"""将 xml_info 保存为 xml 形式"""
if assign_xml_info is None:
assign_xml_info = self.__xml_info_dict.copy()
# 没有值
if not assign_xml_info:
raise ValueError("xml info is empty")
# 写 xml
root = XmlUtil.get_document()
xml_calss_1 = XmlUtil.add_sub_node(root, root, 'annotation', '')
# 增加 "folder", "filename", "path", "segmented"
for attr_name in ["folder", "filename", "path", "segmented"]:
XmlUtil.add_sub_node(root, xml_calss_1, attr_name, assign_xml_info[attr_name])
# 增加 source
source_node = XmlUtil.add_sub_node(root, xml_calss_1, "source", '')
for each_node in assign_xml_info["source"]:
XmlUtil.add_sub_node(root, source_node, each_node, assign_xml_info["source"][each_node])
# 增加 size
size_node = XmlUtil.add_sub_node(root, xml_calss_1, "size", '')
for each_node in assign_xml_info["size"]:
XmlUtil.add_sub_node(root, size_node, each_node, assign_xml_info["size"][each_node])
# 增加 object
for each_object in assign_xml_info["object"]:
object_node = XmlUtil.add_sub_node(root, xml_calss_1, "object", '')
for each_node in each_object:
if each_node != "bndbox":
XmlUtil.add_sub_node(root, object_node, each_node, each_object[each_node])
else:
bndbox_node = XmlUtil.add_sub_node(root, object_node, "bndbox", "")
for each_bndbox in each_object["bndbox"]:
XmlUtil.add_sub_node(root, bndbox_node, each_bndbox, each_object["bndbox"][each_bndbox])
# 保存 xml 到文件
XmlUtil.save_xml(root, save_path)
def parse_xml(xml_path):
"""简易的函数使用版本"""
a = ParseXml()
xml_info = a.get_xml_info(xml_path)
return xml_info
def save_to_xml(xml_info, xml_path):
"""保存为 xml"""
a = ParseXml()
a.save_to_xml(save_path=xml_path, assign_xml_info=xml_info)
if __name__ == "__main__":
xml_path = r"C:\Users\14271\Desktop\merge_w_divice_h_1.8\110kV远布一线二线_#138塔_其它(导地线、光缆、基础、接地装置、杆号牌及附属设施等)_地线_DJI_0585.xml"
xmlInfo = parse_xml(xml_path)
for each in xmlInfo['object']:
print(each['name']) | PypiClean |
/certfrtracker-1.6.3.tar.gz/certfrtracker-1.6.3/src/certfrtracker/html_parser.py | from bs4 import BeautifulSoup
from re import sub
import logging
logger = logging.getLogger(__name__)
def systems_parser(file_content) -> [str]:
"""
returns a string containing the content under the headers. separated by "|" for each line.
:param file_content: str - html content
:return: [str]
"""
logging.debug(f"parsing \"Systèmes affectés\"")
content = ""
soup = BeautifulSoup(file_content, 'html.parser')
for header_title in soup.find_all('h2'):
if str(header_title) in ('<h2>Systèmes affectés</h2>', '<h2>Systèmes affecté(s)</h2>'):
# this "while" statement is the best way to get safely and only the content we need.
cache_header = header_title
while True:
try:
cache_header = cache_header.next_sibling
# if there isn't any "<ul>" tag in header_title, it will get the closest tag from header_title and
# insert it in database
except AttributeError:
content = sub('<.*?>', '', str(header_title.next_sibling))
break
if str(cache_header).startswith("<ul>"):
content = sub('</li> <li>', '|', str(cache_header)[9:-11])
break
return content.replace(u'\xa0', u' ')
def documentation_parser(file_content) -> ([str], [str]):
"""
returns a tuple containing 2 array of string containing the content under the header "documentation". each index
containing a line.
:param file_content: str - html content
:return: ([str], [str])
"""
logging.debug(f"parsing \"Documentation\"")
titles = []
links = []
soup = BeautifulSoup(file_content, 'html.parser')
for header_title in soup.find_all('h2'):
if str(header_title) == '<h2>Documentation</h2>':
# the triple "next_element" might looks ugly,
# but it always get the content of the headers we need to filter
temp_header = header_title.next_element.next_element.next_element
for title in temp_header.select('ul > li'):
titles.append(str(title.next_element).replace(u'\xa0', u' '))
for link in temp_header.select('a'):
links.append(str(link.next_element).replace(u'\xa0', u' '))
return titles, links
def header_summary_parser(file_content) -> [str]:
"""
returns an array of string containing the content under the headers. each index containing a line.
:param file_content: str - html content
:return: [str]
"""
logging.debug(f"parsing \"Résumé\"")
content = ""
soup = BeautifulSoup(file_content, 'html.parser')
for header_title in soup.find_all('h2'):
if str(header_title) == '<h2>Résumé</h2>':
# the triple "next_element" might look ugly,
# but it always gets the content of the headers we need to filter
content = sub('<.*?>', '', str(header_title.next_element.next_element.next_element))
return content.replace(u'\xa0', u' ')
def date_parser(file_content) -> str:
"""
Returns a string containing a date (YYYY-MM-DD).
:param file_content: str - html content
:return: str
"""
logging.debug(f"parsing \"Date de la dernière version\"")
soup = BeautifulSoup(file_content, 'html.parser')
table = soup.find("table", class_="table table-condensed")
all_values = table.find_all('td')
for index, text in enumerate(all_values):
if "Date de la dernière version" in text:
months = ['janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre',
'novembre', 'décembre']
date = sub('<.*?>', '', str(all_values[index + 1])).split()[:3]
date[0] = date[0].zfill(2)
for i, month in enumerate(months):
if month == date[1]:
date[1] = str(i + 1).zfill(2)
return '-'.join(date[::-1])
def define_details(score, CVE, documentation_liens):
"""
Returns a string containing a URL to an external resource about the CVE.
If there isn't any CVE it will return the first link of "documentation_liens" as it should the most useful.
:param score: float
:param CVE: str - CVE-YYYY-NNNN
:param documentation_liens: [str]
"""
logging.debug(f"parsing \"all links to get the most interesting\"")
if score > 0.0:
return ("https://nvd.nist.gov/vuln/detail/" + CVE).replace(u'\xa0', u' ')
if CVE != "":
return ("http://cve.mitre.org/cgi-bin/cvename.cgi?name=" + CVE).replace(u'\xa0', u' ')
if len(documentation_liens) == 0:
return "No further details".replace(u'\xa0', u' ')
else:
return documentation_liens[0].replace(u'\xa0', u' ') | PypiClean |
/CodeIntel-2.0.0b19-cp34-cp34m-macosx_10_12_x86_64.whl/codeintel/codeintel2/lib_srcs/node.js/5.9/dgram.js | var dgram = {};
/**
* The dgram.Socket object is an [EventEmitter][] that encapsulates the
* datagram functionality.
* @constructor
*/
dgram.Socket = function() {}
dgram.Socket.prototype = new events.EventEmitter();
/**
* Tells the kernel to join a multicast group at the given multicastAddress
* using the IP_ADD_MEMBERSHIP socket option. If the multicastInterface
* argument is not specified, the operating system will try to add
* membership to all valid networking interfaces.
* @param multicastAddress {String}
* @param multicastInterface {String}
*/
dgram.Socket.prototype.addMembership = function(multicastAddress, multicastInterface) {}
/**
* For UDP sockets, causes the dgram.Socket to listen for datagram messages
* on a named port and optional address. If port is not specified, the
* operating system will attempt to bind to a random port. If address is
* not specified, the operating system will attempt to listen on all
* addresses. Once binding is complete, a 'listening' event is
* emitted and the optional callback function is called.
* @param port {Number}
* @param address {String}
* @param callback {Function}
*/
dgram.Socket.prototype.bind = function(port, address, callback) {}
/**
* For UDP sockets, causes the dgram.Socket to listen for datagram messages
* on a named port and optional address that are passed as properties of an
* options object passed as the first argument. If port is not specified,
* the operating system will attempt to bind to a random port. If address
* is not specified, the operating system will attempt to listen on all
* addresses. Once binding is complete, a 'listening' event is
* emitted and the optional callback function is called.
* @param options {Object}
* @param callback {Function}
*/
dgram.Socket.prototype.bind = function(options, callback) {}
/**
* Sets the IP_MULTICAST_TTL socket option. While TTL generally stands for
* "Time to Live", in this context it specifies the number of IP hops that
* a packet is allowed to travel through, specifically for multicast
* traffic. Each router or gateway that forwards a packet decrements the
* TTL. If the TTL is decremented to 0 by a router, it will not be
* forwarded.
* @param ttl {Number}
*/
dgram.Socket.prototype.setMulticastTTL = function(ttl) {}
/**
* Broadcasts a datagram on the socket. The destination port and address
* must be specified.
* @param msg {Buffer|String|Array}
* @param offset {Number}
* @param length {Number}
* @param port {Number}
* @param address {String}
* @param callback {Function}
*/
dgram.Socket.prototype.send = function(msg, offset, length, port, address, callback) {}
/**
* Sets or clears the IP_MULTICAST_LOOP socket option. When set to true,
* multicast packets will also be received on the local interface.
* @param flag {Boolean}
*/
dgram.Socket.prototype.setMulticastLoopback = function(flag) {}
/**
* Sets the IP_TTL socket option. While TTL generally stands for "Time to
* Live", in this context it specifies the number of IP hops that a packet
* is allowed to travel through. Each router or gateway that forwards a
* packet decrements the TTL. If the TTL is decremented to 0 by a router,
* it will not be forwarded.
* @param ttl {Number}
*/
dgram.Socket.prototype.setTTL = function(ttl) {}
/**
* Sets or clears the SO_BROADCAST socket option. When set to true, UDP
* packets may be sent to a local interface's broadcast address.
* @param flag {Boolean}
*/
dgram.Socket.prototype.setBroadcast = function(flag) {}
/**
* Returns an object containing the address information for a socket.
* @returns an object containing the address information for a socket
*/
dgram.Socket.prototype.address = function() {}
/**
* Close the underlying socket and stop listening for data on it. If a
* callback is provided, it is added as a listener for the
* ['close'][] event.
* @param callback
*/
dgram.Socket.prototype.close = function(callback) {}
/**
* Instructs the kernel to leave a multicast group at multicastAddress
* using the IP_DROP_MEMBERSHIP socket option. This method is automatically
* called by the kernel when the socket is closed or the process
* terminates, so most apps will never have reason to call this.
* @param multicastAddress {String}
* @param multicastInterface {String}
*/
dgram.Socket.prototype.dropMembership = function(multicastAddress, multicastInterface) {}
/**
* By default, binding a socket will cause it to block the Node.js process
* from exiting as long as the socket is open. The socket.unref() method
* can be used to exclude the socket from the reference counting that keeps
* the Node.js process active. The socket.ref() method adds the socket back
* to the reference counting and restores the default behavior.
*/
dgram.Socket.prototype.ref = function() {}
/**
* By default, binding a socket will cause it to block the Node.js process
* from exiting as long as the socket is open. The socket.unref() method
* can be used to exclude the socket from the reference counting that keeps
* the Node.js process active, allowing the process to exit even if the
* socket is still listening.
*/
dgram.Socket.prototype.unref = function() {}
/** @__local__ */ dgram.Socket.__events__ = {};
/**
* The 'close' event is emitted after a socket is closed with
* [close()][]. Once triggered, no new 'message' events will be
* emitted on this socket.
*/
dgram.Socket.__events__.close = function() {};
/**
* The 'error' event is emitted whenever any error occurs. The
* event handler function is passed a single Error object.
* @param exception {Error}
*/
dgram.Socket.__events__.error = function(exception) {};
/**
* The 'listening' event is emitted whenever a socket begins
* listening for datagram messages. This occurs as soon as UDP sockets are
* created.
*/
dgram.Socket.__events__.listening = function() {};
/**
* The 'message' event is emitted when a new datagram is available
* on a socket. The event handler function is passed two arguments: msg and
* rinfo. The msg argument is a [Buffer][] and rinfo is an object with the
* sender's address information provided by the address, family and
* port properties:
* @param msg {buffer.Buffer}
* @param rinfo {Object}
*/
dgram.Socket.__events__.message = function(msg, rinfo) {};
var events = require("events");
exports = dgram; | PypiClean |
/DisplaceNet-0.1.tar.gz/DisplaceNet-0.1/engine/object_detection_branch/retina_net/keras_retinanet/models/resnet.py | import keras
import keras_resnet
import keras_resnet.models
from . import Backbone
from . import retinanet
class ResNetBackbone(Backbone):
""" Describes backbone information and provides utility functions.
"""
def __init__(self, backbone):
super(ResNetBackbone, self).__init__(backbone)
self.custom_objects.update(keras_resnet.custom_objects)
def retinanet(self, *args, **kwargs):
""" Returns a retinanet model using the correct backbone.
"""
return resnet_retinanet(*args, backbone=self.backbone, **kwargs)
def download_imagenet(self):
""" Downloads ImageNet weights and returns path to weights file.
"""
resnet_filename = 'ResNet-{}-model.keras.h5'
resnet_resource = 'https://github.com/fizyr/keras-models/releases/download/v0.0.1/{}'.format(resnet_filename)
depth = int(self.backbone.replace('resnet', ''))
filename = resnet_filename.format(depth)
resource = resnet_resource.format(depth)
if depth == 50:
checksum = '3e9f4e4f77bbe2c9bec13b53ee1c2319'
elif depth == 101:
checksum = '05dc86924389e5b401a9ea0348a3213c'
elif depth == 152:
checksum = '6ee11ef2b135592f8031058820bb9e71'
return keras.applications.imagenet_utils.get_file(
filename,
resource,
cache_subdir='models',
md5_hash=checksum
)
def validate(self):
""" Checks whether the backbone string is correct.
"""
allowed_backbones = ['resnet50', 'resnet101', 'resnet152']
backbone = self.backbone.split('_')[0]
if backbone not in allowed_backbones:
raise ValueError('Backbone (\'{}\') not in allowed backbones ({}).'.format(backbone, allowed_backbones))
def resnet_retinanet(num_classes, backbone='resnet50', inputs=None, modifier=None, **kwargs):
""" Constructs a retinanet model using a resnet backbone.
Args
num_classes: Number of classes to predict.
backbone: Which backbone to use (one of ('resnet50', 'resnet101', 'resnet152')).
inputs: The inputs to the network (defaults to a Tensor of shape (None, None, 3)).
modifier: A function handler which can modify the backbone before using it in retinanet (this can be used to freeze backbone layers for example).
Returns
RetinaNet model with a ResNet backbone.
"""
# choose default input
if inputs is None:
inputs = keras.layers.Input(shape=(None, None, 3))
# create the resnet backbone
if backbone == 'resnet50':
resnet = keras_resnet.models.ResNet50(inputs, include_top=False, freeze_bn=True)
elif backbone == 'resnet101':
resnet = keras_resnet.models.ResNet101(inputs, include_top=False, freeze_bn=True)
elif backbone == 'resnet152':
resnet = keras_resnet.models.ResNet152(inputs, include_top=False, freeze_bn=True)
else:
raise ValueError('Backbone (\'{}\') is invalid.'.format(backbone))
# invoke modifier if given
if modifier:
resnet = modifier(resnet)
# create the full model
return retinanet.retinanet(inputs=inputs, num_classes=num_classes, backbone_layers=resnet.outputs[1:], **kwargs)
def resnet50_retinanet(num_classes, inputs=None, **kwargs):
return resnet_retinanet(num_classes=num_classes, backbone='resnet50', inputs=inputs, **kwargs)
def resnet101_retinanet(num_classes, inputs=None, **kwargs):
return resnet_retinanet(num_classes=num_classes, backbone='resnet101', inputs=inputs, **kwargs)
def resnet152_retinanet(num_classes, inputs=None, **kwargs):
return resnet_retinanet(num_classes=num_classes, backbone='resnet152', inputs=inputs, **kwargs) | PypiClean |
/KoiLang-1.2.0a0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/kola/klvm/koilang.py | import os
import sys
from contextlib import contextmanager, suppress
from functools import partial
from threading import Lock
from types import TracebackType, new_class
from typing import (Any, Callable, Dict, Generator, List, Optional, Tuple,
Type, TypeVar, Union, overload)
from typing_extensions import Literal, Self
from ..exception import KoiLangError
from ..lexer import BaseLexer, FileLexer, StringLexer
from ..parser import Parser
from .command import Command
from .commandset import CommandSet, CommandSetMeta
from .environment import Environment
_T_EnvCls = TypeVar("_T_EnvCls", bound=Type[Environment])
_T_Handler = TypeVar("_T_Handler", bound=Type["AbstractHandler"])
class KoiLangMeta(CommandSetMeta):
"""
metaclass for KoiLang class
Provide encoding and command threshold support.
"""
__text_encoding__: str
__text_lstrip__: bool
__command_threshold__: int
__command_handlers__: List[Type["AbstractHandler"]]
def __new__(
cls,
name: str,
bases: Tuple[type, ...],
attr: Dict[str, Any],
command_threshold: int = 0,
encoding: Optional[str] = None,
lstrip_text: Optional[bool] = None,
**kwds: Any
) -> Self:
"""
create a top-level language class
:param name: class name
:type name: str
:param bases: class bases
:type bases: Tuple[type, ...]
:param attr: class namespace
:type attr: Dict[str, Any]
:param command_threshold: the `#` prefix length of commands, defaults to 0
:type command_threshold: int, optional
:param encoding: encoding for file parsing, defaults to None
:type encoding: Optional[str], optional
:param lstrip_text: whether to remove text indentation, defaults to True
:type lstrip_text: bool, optional
:return: new class
:rtype: KoiLangMeta
"""
# if not base KoiLang class, set a default value
if not any(isinstance(i, cls) for i in bases):
command_threshold = command_threshold or 1
encoding = encoding or "utf-8"
lstrip_text = lstrip_text if lstrip_text is not None else True
if "__command_handlers__" not in attr:
attr["__command_handlers__"] = []
if command_threshold:
assert command_threshold >= 0
attr["__command_threshold__"] = command_threshold
if lstrip_text is not None:
attr["__text_lstrip__"] = lstrip_text
if encoding:
attr["__text_encoding__"] = encoding
return super().__new__(cls, name, bases, attr, **kwds)
def register_environment(self, env_class: _T_EnvCls) -> _T_EnvCls:
self.__command_field__.add(env_class)
return env_class
def register_handler(self, handler: _T_Handler) -> _T_Handler:
if "__command_handlers__" not in self.__dict__:
# copy the handler list to avoid changing the base classes
self.__command_handlers__ = self.__command_handlers__.copy()
self.__command_handlers__.append(handler)
return handler
@property
def writer(self) -> Type:
"""writer class for KoiLang file building
:return: the writer class, which is a subclass of KoiLangWriter and current KoiLang class
:rtype: Type[KoiLang, Self]
"""
# sourcery skip: inline-immediately-returned-variable
cache = None
with suppress(AttributeError):
cache = self.__writer_class
return cache
cache = new_class(f"{self.__qualname__}.writer", (KoiLangWriter, self))
self.__writer_class = cache
return cache
class KoiLang(CommandSet, metaclass=KoiLangMeta):
"""
main class for KoiLang virtual machine
`KoiLang` class is the top-level interface of 'kola' package.
Just create a subclass to define your own markup language based on KoiLang.
"""
__slots__ = ["_handler", "_lock", "__top", "__exec_level"]
def __init__(self) -> None:
super().__init__()
self._lock = Lock()
self.__top = self
self.__exec_level = 0
self._handler = build_handlers(self.__class__.__command_handlers__, self)
def push_prepare(self, __env_type: Union[Type[Environment], Environment]) -> Environment:
if not isinstance(__env_type, Environment):
__env_type = __env_type(self.__top)
__env_type.set_up(self.__top)
return __env_type
def push_apply(self, __env_cache: Environment) -> None:
assert __env_cache.back is self.__top
with self._lock:
self.__top = __env_cache
def pop_prepare(self, __env_type: Optional[Type[Environment]] = None) -> Environment:
top = self.__top
if top is self: # pragma: no cover
raise ValueError('cannot pop the inital environment')
if __env_type is None:
assert isinstance(top, Environment)
else:
while isinstance(top, Environment) and top.__class__.__env_autopop__:
if isinstance(top, __env_type):
break
top = top.back
else:
if not isinstance(top, __env_type): # pragma: no cover
raise ValueError("unmatched environment")
return top
def pop_apply(self, __env_cache: Environment) -> None:
with self._lock:
top = self.__top
self.__top = __env_cache.back
while isinstance(top, Environment):
top.tear_down(self.__top)
if top is __env_cache:
break
top = top.back
else:
raise ValueError('cannot pop the inital environment')
def add_handler(self, handler: Union[Type["AbstractHandler"], "AbstractHandler"]) -> "AbstractHandler":
if isinstance(handler, type):
handler = handler(self)
self._handler = self._handler.insert(handler)
return handler
def remove_handler(self, handler: "AbstractHandler") -> None:
hdl = self._handler.remove(handler)
if hdl is None: # pragma: no cover
raise ValueError("cannot remove all handlers")
self._handler = hdl
def __parse(self, __lexer: BaseLexer, *, close_lexer: bool = True) -> None:
parser = Parser(__lexer, self)
try:
with self.exec_block():
while True:
try:
# Parser.exec() is a fast C level loop
parser.exec()
except KoiLangError:
if not self.on_exception(*sys.exc_info()):
raise
else:
break
finally:
if close_lexer:
__lexer.close()
def __parse_and_ret(self, __lexer: BaseLexer, *, close_lexer: bool = True) -> Generator[Any, None, None]:
parser = Parser(__lexer, self)
try:
with self.exec_block():
while True:
try:
yield from parser
except KoiLangError:
if not self.on_exception(*sys.exc_info()):
raise
else:
break
finally:
if close_lexer:
__lexer.close()
@overload
def parse(self, lexer: Union[BaseLexer, str], *, with_ret: Literal[False] = False, close_lexer: bool = True) -> None: ...
@overload # noqa: E301
def parse(
self,
lexer: Union[BaseLexer, str],
*,
with_ret: Literal[True],
close_lexer: bool = True
) -> Generator[Any, None, None]: ...
def parse(self, lexer: Union[BaseLexer, str], *, with_ret: bool = False, close_lexer: bool = True) -> Any:
"""parse kola text
:param lexer: Lexer object or legal KoiLang string
:type lexer: Union[BaseLexer, str]
:param with_ret: if true, return a gererater where command returns would be yielded, defaults to False
:type with_ret: bool, optional
:param close_lexer: whether or not to close the lexer, defaults to True
:type close_lexer: bool, optional
:raises ValueError: when a KoiLang string given without trying to close it
:return: return a generator if `with_ret` set
:rtype: Generator[Any, None, None] or None
"""
if isinstance(lexer, str):
if not close_lexer: # pragma: no cover
raise ValueError("inner string lexer must be closed at the end of parsing")
lexer = StringLexer(
lexer,
encoding=self.__class__.__text_encoding__,
command_threshold=self.__class__.__command_threshold__,
no_lstrip=not self.__class__.__text_lstrip__
)
if with_ret:
return self.__parse_and_ret(lexer, close_lexer=close_lexer)
else:
self.__parse(lexer, close_lexer=close_lexer)
def parse_file(self, path: Union[str, bytes, os.PathLike], *, encoding: Optional[str] = None, **kwds: Any) -> Any:
"""
parse a kola file.
"""
return self.parse(
FileLexer(
path, encoding=encoding or self.__class__.__text_encoding__,
command_threshold=self.__class__.__command_threshold__,
no_lstrip=not self.__class__.__text_lstrip__
), **kwds
)
@contextmanager
def exec_block(self) -> Generator[Self, None, None]:
if not self.__exec_level:
self.at_start()
with self._lock:
self.__exec_level += 1
try:
yield self
finally:
with self._lock:
self.__exec_level -= 1
if not self.__exec_level:
self.at_end()
def __getitem__(self, __key: str) -> Callable:
if self.__top is self:
return super().__getitem__(__key)
return self.__top[__key]
def __kola_caller__(self, command: Command, args: tuple, kwargs: Dict[str, Any], **kwds: Any) -> Any:
return self._handler(command, args, kwargs, **kwds)
@property
def top(self) -> CommandSet:
return self.__top
@property
def home(self) -> Self:
return self
@property
def handlers(self) -> "HandlerSequence":
return HandlerSequence(self._handler)
@partial(Command, "@start", virtual=True)
def at_start(self) -> None:
"""
parser initalize command
It is called before parsing start.
"""
@partial(Command, "@end", virtual=True)
def at_end(self) -> None:
"""
parser finalize command
It is called after parsing end. And the return value
will be that of 'parse' method.
"""
@partial(Command, "@exception", virtual=True)
def on_exception(self, exc_type: Type[KoiLangError], exc_ins: Optional[KoiLangError], traceback: TracebackType) -> None:
"""
exception handling command
It is called when a KoiLang error occurs.
If the command wishes to suppress the exception, it should a true value.
"""
from .handler import AbstractHandler, HandlerSequence, build_handlers
from .writer import KoiLangWriter | PypiClean |
/Finance-Hermes-0.3.6.tar.gz/Finance-Hermes-0.3.6/hermes/factors/technical/core/momentum.py | import numpy as np
import pandas as pd
from hermes.factors.technical.core.utilities import *
def annealn(high, low, close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 14
offset = int(offset) if isinstance(offset, int) else 0
high_low_range = high - low
high_close_range = (high - close.shift()).abs()
low_close_range = (low - close.shift()).abs()
cond1 = high_low_range > high_close_range
high_close_range[cond1] = high_low_range[cond1]
cond2 = high_close_range > low_close_range
low_close_range[cond2] = high_close_range[cond2]
tr = low_close_range
atr = tr.rolling(length).mean()
ret = close - close.shift(length) + 0.00001
atr_adj = 2 * ret / (atr + atr.shift(length))
# Offset
if offset != 0:
atr_adj = atr_adj.shift(offset)
# Handle fills
if "fillna" in kwargs:
atr_adj.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
atr_adj.fillna(method=kwargs["fill_method"], inplace=True)
return atr_adj
def ao(high, low, fast=None, slow=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import sma
fast = int(fast) if fast and fast > 0 else 5
slow = int(slow) if slow and slow > 0 else 34
if slow < fast:
fast, slow = slow, fast
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
median_price = 0.5 * (high + low)
fast_sma = sma(median_price, fast)
slow_sma = sma(median_price, slow)
ao = fast_sma - slow_sma
# Offset
if offset != 0:
ao = ao.shift(offset)
# Handle fills
if "fillna" in kwargs:
ao.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
ao.fillna(method=kwargs["fill_method"], inplace=True)
return ao
def apo(close, fast=None, slow=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import sma
fast = int(fast) if fast and fast > 0 else 12
slow = int(slow) if slow and slow > 0 else 26
if slow < fast:
fast, slow = slow, fast
offset = int(offset) if isinstance(offset, int) else 0
fastma = sma(close, length=fast, **kwargs)
slowma = sma(close, length=slow, **kwargs)
apo = fastma - slowma
# Offset
if offset != 0:
apo = apo.shift(offset)
# Handle fills
if "fillna" in kwargs:
apo.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
apo.fillna(method=kwargs["fill_method"], inplace=True)
return apo
def bias(close, length=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import sma
length = int(length) if length and length > 0 else 26
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
bma = sma(close, length=length, **kwargs)
bias = (close / bma) - 1
# Offset
if offset != 0:
bias = bias.shift(offset)
# Handle fills
if "fillna" in kwargs:
bias.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
bias.fillna(method=kwargs["fill_method"], inplace=True)
return bias
def bop(open, high, low, close, scalar=None, offset=None, **kwargs):
scalar = float(scalar) if scalar else 1
offset = int(offset) if isinstance(offset, int) else 0
high_low_range = non_zero_range(high, low)
close_open_range = non_zero_range(close, open)
bop = scalar * close_open_range / high_low_range
# Offset
if offset != 0:
bop = bop.shift(offset)
# Handle fills
if "fillna" in kwargs:
bop.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
bop.fillna(method=kwargs["fill_method"], inplace=True)
return bop
def brar(open,
high,
low,
close,
length=None,
scalar=None,
drift=None,
offset=None,
**kwargs):
length = int(length) if length and length > 0 else 26
scalar = float(scalar) if scalar else 100
high_open_range = non_zero_range(high, open)
open_low_range = non_zero_range(open, low)
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
hcy = non_zero_range(high, close.shift(drift))
cyl = non_zero_range(close.shift(drift), low)
hcy[hcy < 0] = 0 # Zero negative values
cyl[cyl < 0] = 0 # ""
ar = scalar * high_open_range.rolling(length).sum()
ar /= open_low_range.rolling(length).sum()
br = scalar * hcy.rolling(length).sum()
br /= cyl.rolling(length).sum()
# Offset
if offset != 0:
ar = ar.shift(offset)
br = ar.shift(offset)
# Handle fills
if "fillna" in kwargs:
ar.fillna(kwargs["fillna"], inplace=True)
br.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
ar.fillna(method=kwargs["fill_method"], inplace=True)
br.fillna(method=kwargs["fill_method"], inplace=True)
return ar, br
def chkbar(close, high, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
chkbar = ((close - high) / high).rolling(length).mean()
# Offset
if offset != 0:
chkbar = chkbar.shift(offset)\
# Handle fills
if "fillna" in kwargs:
chkbar.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
chkbar.fillna(method=kwargs["fill_method"], inplace=True)
return chkbar
def clkbar(close, low, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
clkbar = ((close - low) / low).rolling(length).mean()
# Offset
if offset != 0:
clkbar = clkbar.shift(offset)\
# Handle fills
if "fillna" in kwargs:
clkbar.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
clkbar.fillna(method=kwargs["fill_method"], inplace=True)
return clkbar
def cci(high, low, close, length=None, c=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import sma, hlc3
from hermes.factors.technical.core.statistics import mad
length = int(length) if length and length > 0 else 14
c = float(c) if c and c > 0 else 0.015
offset = int(offset) if isinstance(offset, int) else 0
typical_price = hlc3(high=high, low=low, close=close)
mean_typical_price = sma(typical_price, length=length)
mad_typical_price = mad(typical_price, length=length)
cci = typical_price - mean_typical_price
cci /= c * mad_typical_price
# Offset
if offset != 0:
cci = cci.shift(offset)
# Handle fills
if "fillna" in kwargs:
cci.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
cci.fillna(method=kwargs["fill_method"], inplace=True)
return cci
def cg(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
coefficients = [length - i for i in range(0, length)]
numerator = -close.rolling(length).apply(weights(coefficients), raw=True)
cg = numerator / close.rolling(length).sum()
# Offset
if offset != 0:
cg = cg.shift(offset)
# Handle fills
if "fillna" in kwargs:
cg.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
cg.fillna(method=kwargs["fill_method"], inplace=True)
return cg
def cmo(close, length=None, scalar=None, drift=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 14
scalar = float(scalar) if scalar else 100
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
offset = int(offset) if isinstance(offset, int) else 0
mom = close.diff(drift)
positive = mom.copy().clip(lower=0)
negative = mom.copy().clip(upper=0).abs()
pos_ = positive.rolling(length).sum()
neg_ = negative.rolling(length).sum()
cmo = scalar * (pos_ - neg_) / (pos_ + neg_)
# Offset
if offset != 0:
cmo = cmo.shift(offset)
# Handle fills
if "fillna" in kwargs:
cmo.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
cmo.fillna(method=kwargs["fill_method"], inplace=True)
return cmo
def coppock(close, length=None, fast=None, slow=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import wma
length = int(length) if length and length > 0 else 10
fast = int(fast) if fast and fast > 0 else 11
slow = int(slow) if slow and slow > 0 else 14
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
total_roc = roc(close, fast) + roc(close, slow)
coppock = wma(total_roc, length)
# Offset
if offset != 0:
coppock = coppock.shift(offset)
# Handle fills
if "fillna" in kwargs:
coppock.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
coppock.fillna(method=kwargs["fill_method"], inplace=True)
return coppock
def dnclvolatility(close, low, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
pre_close = close.shift(1)
dnclvolatility = ((low - pre_close) / pre_close).rolling(length).mean()
# Offset
if offset != 0:
dnclvolatility = dnclvolatility.shift(offset)
# Handle fills
if "fillna" in kwargs:
dnclvolatility.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
dnclvolatility.fillna(method=kwargs["fill_method"], inplace=True)
return dnclvolatility
def dnllvolatility(low, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
pre_low = low.shift(1)
dnllvolatility = ((low - pre_low) / pre_low).rolling(length).mean()
# Offset
if offset != 0:
dnllvolatility = dnllvolatility.shift(offset)
# Handle fills
if "fillna" in kwargs:
dnllvolatility.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
dnllvolatility.fillna(method=kwargs["fill_method"], inplace=True)
return dnllvolatility
def dm(high, low, length=None, drift=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import rma
length = int(length) if length and length > 0 else 14
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
offset = int(offset) if isinstance(offset, int) else 0
up = high - high.shift(drift)
dn = low.shift(drift) - low
pos_ = ((up > dn) & (up > 0)) * up
neg_ = ((dn > up) & (dn > 0)) * dn
pos_[pos_.abs() < sflt.epsilon] = 0
neg_[neg_.abs() < sflt.epsilon] = 0
# Not the same values as TA Lib's -+DM (Good First Issue)
pos = rma(pos_, length=length)
neg = rma(neg_, length=length)
# Offset
if offset != 0:
pos = pos.shift(offset)
neg = neg.shift(offset)
# Handle fills
if "fillna" in kwargs:
pos.fillna(kwargs["fillna"], inplace=True)
neg.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
pos.fillna(method=kwargs["fill_method"], inplace=True)
neg.fillna(method=kwargs["fill_method"], inplace=True)
return pos, neg
def dnintraday(open, low, length=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
dnintraday = ((low - open) / open).rolling(length).mean()
# Offset
if offset != 0:
er = er.shift(offset)
# Handle fills
if "fillna" in kwargs:
dnintraday.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
dnintraday.fillna(method=kwargs["fill_method"], inplace=True)
return dnintraday
def er(close, length=None, drift=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
abs_diff = close.diff(length).abs()
abs_volatility = close.diff(drift).abs()
er = abs_diff
er /= abs_volatility.rolling(window=length).sum()
# Offset
if offset != 0:
er = er.shift(offset)
# Handle fills
if "fillna" in kwargs:
er.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
er.fillna(method=kwargs["fill_method"], inplace=True)
return er
def eri(high, low, close, length=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import ema
length = int(length) if length and length > 0 else 13
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
ema_ = ema(close, length)
bull = high - ema_
bear = low - ema_
# Offset
if offset != 0:
bull = bull.shift(offset)
bear = bear.shift(offset)
# Handle fills
if "fillna" in kwargs:
bull.fillna(kwargs["fillna"], inplace=True)
bear.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
bull.fillna(method=kwargs["fill_method"], inplace=True)
bear.fillna(method=kwargs["fill_method"], inplace=True)
return bull, bear
def effratio(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 14
offset = int(offset) if isinstance(offset, int) else 0
net_chg = close - close.shift(length)
for i in range(length):
net_chg.iloc[i] = close.iloc[i] - close.iloc[0]
temp_chg = abs(close - close.shift(1))
temp_chg.iloc[0] = temp_chg.iloc[1]
tot_chg = temp_chg.rolling(length).sum()
for i in range(length):
if i == 0:
tot_chg.iloc[i] = 0
else:
tot_chg.iloc[i] = temp_chg.iloc[:i + 1].sum() + (
length - i - 1) * temp_chg.iloc[0]
effratio = net_chg / tot_chg
# Offset
if offset != 0:
effratio = effratio.shift(offset)
# Handle fills
if "fillna" in kwargs:
effratio.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
effratio.fillna(method=kwargs["fill_method"], inplace=True)
return effratio
def intraday(open, close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
intraday = ((close - open) / open).rolling(length).mean()
# Offset
if offset != 0:
intraday = intraday.shift(offset)
# Handle fills
if "fillna" in kwargs:
intraday.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
intraday.fillna(method=kwargs["fill_method"], inplace=True)
return intraday
def kdj(high=None,
low=None,
close=None,
length=None,
signal=None,
offset=None,
**kwargs):
from hermes.factors.technical.core.overlap import rma
length = int(length) if length and length > 0 else 9
signal = int(signal) if signal and signal > 0 else 3
offset = int(offset) if isinstance(offset, int) else 0
highest_high = high.rolling(length).max()
lowest_low = low.rolling(length).min()
fastk = 100 * (close - lowest_low) / non_zero_range(
highest_high, lowest_low)
k = rma(fastk, length=signal)
d = rma(k, length=signal)
j = 3 * k - 2 * d
# Offset
if offset != 0:
k = k.shift(offset)
d = d.shift(offset)
j = j.shift(offset)
# Handle fills
if "fillna" in kwargs:
k.fillna(kwargs["fillna"], inplace=True)
d.fillna(kwargs["fillna"], inplace=True)
j.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
k.fillna(method=kwargs["fill_method"], inplace=True)
d.fillna(method=kwargs["fill_method"], inplace=True)
j.fillna(method=kwargs["fill_method"], inplace=True)
return k, d, j
def kst(close,
roc1=None,
roc2=None,
roc3=None,
roc4=None,
sma1=None,
sma2=None,
sma3=None,
sma4=None,
signal=None,
drift=None,
offset=None,
**kwargs):
roc1 = int(roc1) if roc1 and roc1 > 0 else 10
roc2 = int(roc2) if roc2 and roc2 > 0 else 15
roc3 = int(roc3) if roc3 and roc3 > 0 else 20
roc4 = int(roc4) if roc4 and roc4 > 0 else 30
sma1 = int(sma1) if sma1 and sma1 > 0 else 10
sma2 = int(sma2) if sma2 and sma2 > 0 else 10
sma3 = int(sma3) if sma3 and sma3 > 0 else 10
sma4 = int(sma4) if sma4 and sma4 > 0 else 15
signal = int(signal) if signal and signal > 0 else 9
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
rocma1 = roc(close, roc1).rolling(sma1).mean()
rocma2 = roc(close, roc2).rolling(sma2).mean()
rocma3 = roc(close, roc3).rolling(sma3).mean()
rocma4 = roc(close, roc4).rolling(sma4).mean()
kst = 100 * (rocma1 + 2 * rocma2 + 3 * rocma3 + 4 * rocma4)
kst_signal = kst.rolling(signal).mean()
# Offset
if offset != 0:
kst = kst.shift(offset)
kst_signal = kst_signal.shift(offset)
# Handle fills
if "fillna" in kwargs:
kst.fillna(kwargs["fillna"], inplace=True)
kst_signal.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
kst.fillna(method=kwargs["fill_method"], inplace=True)
kst_signal.fillna(method=kwargs["fill_method"], inplace=True)
return kst, kst_signal
def macd(close, fast=None, slow=None, signal=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import ema
fast = int(fast) if fast and fast > 0 else 12
slow = int(slow) if slow and slow > 0 else 26
signal = int(signal) if signal and signal > 0 else 9
if slow < fast:
fast, slow = slow, fast
offset = int(offset) if isinstance(offset, int) else 0
as_mode = kwargs.setdefault("asmode", False)
fastma = ema(close, length=fast)
slowma = ema(close, length=slow)
macd = fastma - slowma
signalma = ema(close=macd.loc[macd.first_valid_index():, ], length=signal)
histogram = macd - signalma
if as_mode:
macd = macd - signalma
signalma = ema(close=macd.loc[macd.first_valid_index():, ],
length=signal)
histogram = macd - signalma
# Offset
if offset != 0:
macd = macd.shift(offset)
histogram = histogram.shift(offset)
signalma = signalma.shift(offset)
return macd, histogram, signalma
def mom(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
mom = close.diff(length)
# Offset
if offset != 0:
mom = mom.shift(offset)
# Handle fills
if "fillna" in kwargs:
mom.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
mom.fillna(method=kwargs["fill_method"], inplace=True)
return mom
def overnight(open, close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
pre_close = close.shift(1)
overnight = ((open - pre_close) / pre_close).rolling(length).mean()
# Offset
if offset != 0:
overnight = overnight.shift(offset)
# Handle fills
if "fillna" in kwargs:
overnight.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
overnight.fillna(method=kwargs["fill_method"], inplace=True)
return overnight
def pgo(high, low, close, length=None, offset=None, **kwargs):
from hermes.factors.technical.core.volatility import atr
from hermes.factors.technical.core.overlap import sma, ema
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
pgo = close - sma(close, length)
pgo /= ema(atr(high, low, close, length), length)
# Offset
if offset != 0:
pgo = pgo.shift(offset)
# Handle fills
if "fillna" in kwargs:
pgo.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
pgo.fillna(method=kwargs["fill_method"], inplace=True)
return pgo
def ppo(close,
fast=None,
slow=None,
signal=None,
scalar=None,
offset=None,
**kwargs):
from hermes.factors.technical.core.overlap import sma, ema
fast = int(fast) if fast and fast > 0 else 12
slow = int(slow) if slow and slow > 0 else 26
signal = int(signal) if signal and signal > 0 else 9
scalar = float(scalar) if scalar else 100
if slow < fast:
fast, slow = slow, fast
offset = int(offset) if isinstance(offset, int) else 0
fastma = sma(close, length=fast)
slowma = sma(close, length=slow)
ppo = scalar * (fastma - slowma)
ppo /= slowma
signalma = ema(ppo, length=signal)
histogram = ppo - signalma
# Offset
if offset != 0:
ppo = ppo.shift(offset)
histogram = histogram.shift(offset)
signalma = signalma.shift(offset)
# Handle fills
if "fillna" in kwargs:
ppo.fillna(kwargs["fillna"], inplace=True)
histogram.fillna(kwargs["fillna"], inplace=True)
signalma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
ppo.fillna(method=kwargs["fill_method"], inplace=True)
histogram.fillna(method=kwargs["fill_method"], inplace=True)
signalma.fillna(method=kwargs["fill_method"], inplace=True)
return ppo, histogram, signalma
def psl(close,
open,
length=None,
scalar=None,
drift=None,
offset=None,
**kwargs):
length = int(length) if length and length > 0 else 12
scalar = float(scalar) if scalar and scalar > 0 else 100
offset = int(offset) if isinstance(offset, int) else 0
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
diff = np.sign(close - open)
diff.fillna(0, inplace=True)
diff[diff <= 0] = 0 # Zero negative values
psl = scalar * diff.rolling(length).sum()
psl /= length
# Offset
if offset != 0:
psl = psl.shift(offset)
# Handle fills
if "fillna" in kwargs:
psl.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
psl.fillna(method=kwargs["fill_method"], inplace=True)
return psl
def pvo(volume,
fast=None,
slow=None,
signal=None,
scalar=None,
offset=None,
**kwargs):
from hermes.factors.technical.core.overlap import ema
fast = int(fast) if fast and fast > 0 else 12
slow = int(slow) if slow and slow > 0 else 26
signal = int(signal) if signal and signal > 0 else 9
scalar = float(scalar) if scalar else 100
if slow < fast:
fast, slow = slow, fast
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
fastma = ema(volume, length=fast)
slowma = ema(volume, length=slow)
pvo = scalar * (fastma - slowma)
pvo /= slowma
signalma = ema(pvo, length=signal)
histogram = pvo - signalma
# Offset
if offset != 0:
pvo = pvo.shift(offset)
histogram = histogram.shift(offset)
signalma = signalma.shift(offset)
# Handle fills
if "fillna" in kwargs:
pvo.fillna(kwargs["fillna"], inplace=True)
histogram.fillna(kwargs["fillna"], inplace=True)
signalma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
pvo.fillna(method=kwargs["fill_method"], inplace=True)
histogram.fillna(method=kwargs["fill_method"], inplace=True)
signalma.fillna(method=kwargs["fill_method"], inplace=True)
return pvo, histogram, signalma
def roc(close, length=None, scalar=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
scalar = float(scalar) if scalar and scalar > 0 else 100
offset = int(offset) if isinstance(offset, int) else 0
roc = scalar * mom(close=close, length=length) / close.shift(length)
# Offset
if offset != 0:
roc = roc.shift(offset)
# Handle fills
if "fillna" in kwargs:
roc.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
roc.fillna(method=kwargs["fill_method"], inplace=True)
return roc
def rsi(close, length=None, scalar=None, drift=None, offset=None, **kwargs):
from hermes.factors.technical.core.overlap import rma
length = int(length) if length and length > 0 else 14
scalar = float(scalar) if scalar else 100
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
offset = int(offset) if isinstance(offset, int) else 0
negative = close.diff(drift)
positive = negative.copy()
positive[positive < 0] = 0 # Make negatives 0 for the postive series
negative[negative > 0] = 0 # Make postives 0 for the negative series
positive_avg = rma(positive, length=length)
negative_avg = rma(negative, length=length)
rsi = scalar * positive_avg / (positive_avg + negative_avg.abs())
# Offset
if offset != 0:
rsi = rsi.shift(offset)
# Handle fills
if "fillna" in kwargs:
rsi.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
rsi.fillna(method=kwargs["fill_method"], inplace=True)
return rsi
def rvgi(open_,
high,
low,
close,
length=None,
swma_length=None,
offset=None,
**kwargs):
from hermes.factors.technical.core.overlap import swma
high_low_range = non_zero_range(high, low)
close_open_range = non_zero_range(close, open_)
length = int(length) if length and length > 0 else 14
swma_length = int(swma_length) if swma_length and swma_length > 0 else 4
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
numerator = swma(close_open_range,
length=swma_length).rolling(length).sum()
denominator = swma(high_low_range,
length=swma_length).rolling(length).sum()
rvgi = numerator / denominator
signal = swma(rvgi, length=swma_length)
# Offset
if offset != 0:
rvgi = rvgi.shift(offset)
signal = signal.shift(offset)
# Handle fills
if "fillna" in kwargs:
rvgi.fillna(kwargs["fillna"], inplace=True)
signal.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
rvgi.fillna(method=kwargs["fill_method"], inplace=True)
signal.fillna(method=kwargs["fill_method"], inplace=True)
return rvgi, signal
def slope(close,
length=None,
as_angle=None,
to_degrees=None,
vertical=None,
offset=None,
**kwargs):
length = int(length) if length and length > 0 else 1
as_angle = True if isinstance(as_angle, bool) else False
to_degrees = True if isinstance(to_degrees, bool) else False
offset = int(offset) if isinstance(offset, int) else 0
slope = close.diff(length) / length
if as_angle:
slope = slope.apply(np.arctan)
if to_degrees:
slope *= 180 / np.pi
# Offset
if offset != 0:
slope = slope.shift(offset)
# Handle fills
if "fillna" in kwargs:
slope.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
slope.fillna(method=kwargs["fill_method"], inplace=True)
return slope
def stoch(high,
low,
close,
k=None,
d=None,
smooth_k=None,
offset=None,
**kwargs):
from hermes.factors.technical.core.overlap import sma
k = k if k and k > 0 else 14
d = d if d and d > 0 else 3
smooth_k = smooth_k if smooth_k and smooth_k > 0 else 3
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
lowest_low = low.rolling(k).min()
highest_high = high.rolling(k).max()
stoch = 100 * (close - lowest_low)
stoch /= non_zero_range(highest_high, lowest_low)
stoch_k = sma(stoch.loc[stoch.first_valid_index():, ], length=smooth_k)
stoch_d = sma(stoch_k.loc[stoch_k.first_valid_index():, ], length=d)
# Offset
if offset != 0:
stoch_k = stoch_k.shift(offset)
stoch_d = stoch_d.shift(offset)
# Handle fills
if "fillna" in kwargs:
stoch_k.fillna(kwargs["fillna"], inplace=True)
stoch_d.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
stoch_k.fillna(method=kwargs["fill_method"], inplace=True)
stoch_d.fillna(method=kwargs["fill_method"], inplace=True)
return stoch_k, stoch_d
def stochrsi(close,
length=None,
rsi_length=None,
k=None,
d=None,
offset=None,
**kwargs):
from hermes.factors.technical.core.overlap import sma
length = length if length and length > 0 else 14
rsi_length = rsi_length if rsi_length and rsi_length > 0 else 14
k = k if k and k > 0 else 3
d = d if d and d > 0 else 3
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
rsi_ = rsi(close, length=rsi_length)
lowest_rsi = rsi_.rolling(length).min()
highest_rsi = rsi_.rolling(length).max()
stoch = 100 * (rsi_ - lowest_rsi)
stoch /= non_zero_range(highest_rsi, lowest_rsi)
stochrsi_k = sma(stoch, length=k)
stochrsi_d = sma(stochrsi_k, length=d)
# Offset
if offset != 0:
stochrsi_k = stochrsi_k.shift(offset)
stochrsi_d = stochrsi_d.shift(offset)
# Handle fills
if "fillna" in kwargs:
stochrsi_k.fillna(kwargs["fillna"], inplace=True)
stochrsi_d.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
stochrsi_k.fillna(method=kwargs["fill_method"], inplace=True)
stochrsi_d.fillna(method=kwargs["fill_method"], inplace=True)
return stochrsi_k, stochrsi_d
def trix(close,
length=None,
signal=None,
scalar=None,
drift=None,
offset=None,
**kwargs):
from hermes.factors.technical.core.overlap import ema
length = int(length) if length and length > 0 else 30
signal = int(signal) if signal and signal > 0 else 9
scalar = float(scalar) if scalar else 100
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
ema1 = ema(close=close, length=length, **kwargs)
ema2 = ema(close=ema1, length=length, **kwargs)
ema3 = ema(close=ema2, length=length, **kwargs)
trix = scalar * ema3.pct_change(drift)
trix_signal = trix.rolling(signal).mean()
# Offset
if offset != 0:
trix = trix.shift(offset)
trix_signal = trix_signal.shift(offset)
# Handle fills
if "fillna" in kwargs:
trix.fillna(kwargs["fillna"], inplace=True)
trix_signal.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
trix.fillna(method=kwargs["fill_method"], inplace=True)
trix_signal.fillna(method=kwargs["fill_method"], inplace=True)
return trix, trix_signal
def tsi(close,
fast=None,
slow=None,
signal=None,
scalar=None,
drift=None,
offset=None,
**kwargs):
from hermes.factors.technical.core.overlap import ema
fast = int(fast) if fast and fast > 0 else 13
slow = int(slow) if slow and slow > 0 else 25
signal = int(signal) if signal and signal > 0 else 13
scalar = float(scalar) if scalar else 100
drift = int(drift) if isinstance(drift, int) and drift != 0 else 1
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
diff = close.diff(drift)
slow_ema = ema(close=diff, length=slow, **kwargs)
fast_slow_ema = ema(close=slow_ema, length=fast, **kwargs)
abs_diff = diff.abs()
abs_slow_ema = ema(close=abs_diff, length=slow, **kwargs)
abs_fast_slow_ema = ema(close=abs_slow_ema, length=fast, **kwargs)
tsi = scalar * fast_slow_ema / abs_fast_slow_ema
tsi_signal = ema(tsi, length=signal)
# Offset
if offset != 0:
tsi = tsi.shift(offset)
tsi_signal = tsi_signal.shift(offset)
# Handle fills
if "fillna" in kwargs:
tsi.fillna(kwargs["fillna"], inplace=True)
tsi_signal.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
tsi.fillna(method=kwargs["fill_method"], inplace=True)
tsi_signal.fillna(method=kwargs["fill_method"], inplace=True)
return tsi, tsi_signal
def upintraday(open, high, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
upintraday = ((high - open) / open).rolling(length).mean()
# Offset
if offset != 0:
upintraday = upintraday.shift(offset)
# Handle fills
if "fillna" in kwargs:
upintraday.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
upintraday.fillna(method=kwargs["fill_method"], inplace=True)
return upintraday
def dnintraday(open, low, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
dnintraday = ((low - open) / open).rolling(length).mean()
# Offset
if offset != 0:
dnintraday = dnintraday.shift(offset)
# Handle fills
if "fillna" in kwargs:
dnintraday.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
dnintraday.fillna(method=kwargs["fill_method"], inplace=True)
return dnintraday
def upchvolatility(close, high, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
pre_close = close.shift(1)
upchvolatility = ((high - pre_close) / pre_close).rolling(length).mean()
# Offset
if offset != 0:
upchvolatility = upchvolatility.shift(offset)
# Handle fills
if "fillna" in kwargs:
upchvolatility.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
upchvolatility.fillna(method=kwargs["fill_method"], inplace=True)
return upchvolatility
def uphhvolatility(high, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 5
offset = int(offset) if isinstance(offset, int) else 0
pre_high = high.shift(1)
uphhvolatility = ((high - pre_high) / pre_high).rolling(length).mean()
# Offset
if offset != 0:
uphhvolatility = uphhvolatility.shift(offset)
# Handle fills
if "fillna" in kwargs:
uphhvolatility.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
uphhvolatility.fillna(method=kwargs["fill_method"], inplace=True)
return uphhvolatility
def willr(high, low, close, length=None, talib=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 14
min_periods = int(
kwargs["min_periods"]) if "min_periods" in kwargs and kwargs[
"min_periods"] is not None else length
offset = int(offset) if isinstance(offset, int) else 0
lowest_low = low.rolling(length, min_periods=min_periods).min()
highest_high = high.rolling(length, min_periods=min_periods).max()
willr = 100 * ((close - lowest_low) / (highest_high - lowest_low) - 1)
# Offset
if offset != 0:
willr = willr.shift(offset)
# Handle fills
if "fillna" in kwargs:
willr.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
willr.fillna(method=kwargs["fill_method"], inplace=True)
return willr | PypiClean |
/BioMine-0.9.5.zip/BioMine-0.9.5/biomine/webapi/entrez/entrezapi.py |
import xml.etree.ElementTree as ET
from biomine.webapi.webapi import webapi
from biomine.variant.variant import variant
from biomine.variant.mafvariant import mafvariant
from biomine.variant.clinvarvariant import clinvarvariant
import re
class entrezapi(webapi):
endpoint = "http://eutils.ncbi.nlm.nih.gov/entrez/eutils/"
esearch = "esearch.fcgi?"
esummary = "esummary.fcgi?"
elink = "elink.fcgi?"
epost = "epost.fcgi?"
efetch = "efetch.fcgi?"
pubmed = "PubMed"
clinvar = "ClinVar"
protein = "Protein"
gene = "Gene"
dbsnp = "dbSNP"
dbvar = "dbVar"
omim = "OMIM"
defaultGroup = "grabBag"
largestBatchSize = 500
requestsPerWindow = 3
timeWindow = 1 #in seconds
searchBatchSize = 50
summaryBatchSize = 500
def __init__(self,**kwargs):
subset = kwargs.get("subset",'')
if not subset:
super(entrezapi,self).__init__(entrezapi.endpoint,entrezapi.esearch)
else:
if ( subset == entrezapi.esearch or subset == entrezapi.esummary or
subset == entrezapi.elink or subset == entrezapi.epost or
subset == entrezapi.efetch ):
super(entrezapi,self).__init__(entrezapi.endpoint,subset)
else:
# print "biomine ERROR: bad subset. restapi.subset initializing to variant association results"
super(entrezapi,self).__init__(entrezapi.endpoint,entrezapi.hgvs)
self.database = kwargs.get("database",entrezapi.clinvar)
self.queries = { entrezapi.defaultGroup : "" }
self.uids = []
#self.linkname = ""
#self.databaseFrom = ""
#self.command = ""
self.query_key = ""
self.web_env = ""
self.retmax = ""
self.retstart = ""
self.rettype = ""
self.retmode = ""
self.usehistory = ""
self.assembly = "GRCh37"
self.setRequestLimits()
def addQuery( self , term , **kwargs ):
field = kwargs.get( "field" , "" )
group = kwargs.get( "group" , entrezapi.defaultGroup )
query = ""
#print "Adding query: " + term + " " + field + " " + group ,
if group in self.queries: #the group already has a query going
#print "\tGroup=" + group + " exists!"
if group != entrezapi.defaultGroup: #not the default group
#print "\t\tCurrent query is=" + self.queries[group]
query = self.queries[group] + "+AND+"
else: #anything goes in the default group
#print "\t\tCurrent query is=" + self.queries[group]
query = self.queries[group] + "+OR+"
#print "\tcurrrent query is: " + query + "."
query += term
if field:
query += "[" + field + "]"
#print "\tNew query for group=" + group + " is: " + query
tempq = { group : query }
self.queries.update( tempq )
def buildQuery( self ):
query = ""
for group in self.queries:
if self.queries[group]:
query += "(" + self.queries[group] + ")+OR+"
return query
def prepQuery( self , userVariants ): #expects a list of variants
self.resetQuery()
for var in userVariants:
thisGroup = var.uniqueVar()
self.addQuery( str(var.gene) , field="gene" , group=thisGroup )
self.addQuery( str(var.chromosome) , field="chr" , group=thisGroup )
self.addQuery( str(var.start) + ":" + str(var.stop) , field="chrpos37" , group=thisGroup )
self.addQuery( "human" , field="orgn" , group=thisGroup )
#self.addQuery( var.variantClass , "vartype" )
#self.addQuery( var.referencePeptide + var.positionPeptide + var.alternatePeptide , "Variant name" )
#var.referencePeptide , var.positionPeptide , var.alternatePeptide
def addID( self , uid ):
self.uids.append( uid )
def resetQuery( self ):
self.queries = {}
def resetID( self ):
self.uids = []
def buildSearchAction( self ):
query = self.buildQuery()
self.action = '&'.join( [ "db=" + self.database , "term=" + query ] )
self.actionReturnParameters( )
return self.action
def buildWebEnvAction( self ):
try:
self.action = '&'.join( [ "db=" + self.database , "WebEnv=" + self.web_env ,
"query_key=" + self.query_key ] )
self.usehistory = ""
self.actionReturnParameters( )
return self.action
except:
print "entrez Error: can't use WebEnv"
pass
def buildSummaryAction( self , ids ):
self.uid = ','.join( ids )
self.action = "db=" + self.database + "&id=" + self.uid
return self.action
def actionReturnParameters( self ):
if self.rettype:
self.action += "&rettype=" + self.rettype
if self.retmode:
self.action += "&retmode=" + self.retmode
if self.retstart:
self.action += "&retstart=" + str(self.retstart)
if self.retmax:
self.action += "&retmax=" + str(self.retmax)
if self.usehistory:
self.action += "&usehistory=" + self.usehistory
return self.action
def setRetmax( self , total , **kwargs ):
summaryBatchSize = kwargs.get( 'summaryBatchSize' , entrezapi.largestBatchSize )
if total > summaryBatchSize:
# print str(total) + " > " + str(summaryBatchSize)
self.retmax = summaryBatchSize
else:
# print str(total) + " <= " + str(summaryBatchSize)
self.retmax = total
def doBatch( self , summaryBatchSize ):
self.usehistory = "y"
action = self.buildSearchAction( )
url = self.buildURL()
# print url
response = self.submit()
root = self.getXMLRoot()
variants = {}
if root != "<None/>":
self.web_env = self.getEntry( root , 'WebEnv' )
self.query_key = self.getEntry( root , 'QueryKey' )
totalRecords = 0
totalRecords = self.getEntry( root , 'Count' )
try:
if int(totalRecords) > 0:
# print totalRecords + "records found"
self.setRetmax( int(totalRecords) , summaryBatchSize=summaryBatchSize )
self.subset = entrezapi.esummary
for self.retstart in range( 0 , int(totalRecords) , self.retmax ):
self.action = self.buildWebEnvAction()
url = self.buildURL()
# print url
summaryResponse = self.submit()
variants.update( self.getClinVarEntry() )
#print "These are the ClinVar variants: "
#print variants
except:
print "CharGer Warning: No ClinVar records in this batch\n"
return variants
def parseClinVarTitle( self , DocumentSummary ):
# print "biomine::webapi::entrez::entrezapi::parseClinVarTitle - " ,
title = self.getEntry( DocumentSummary , 'title' )
# print title
codonPos = ""
peptideRef = ""
peptidePos = ""
peptideAlt = ""
var = mafvariant()
residueMatches = re.search( "\((p\.\w+)\)" , title )
# print "peptide variant: " ,
# print residueMatches
if residueMatches:
hgvsp = residueMatches.groups()[-1]
# print hgvsp
[ peptideRef , peptidePos , peptideAlt ] = var.splitHGVSp( hgvsp )
codonMatches = re.search( "(c\.\d+)" , title )
# print "codon variant: " ,
# print codonMatches
if codonMatches:
hgvsc = codonMatches.groups()[-1]
# print hgvsc
[ codonRef , codonPos , codonAlt ] = var.splitHGVSc( hgvsc )
return { "title" : title , \
"referencePeptide" : peptideRef , \
"positionPeptide" : peptidePos , \
"alternatePeptide" : peptideAlt , \
"positionCodon" : codonPos }
def getClinVarEntry( self ):
# print "\twebapi::entrez::entrezapi::getClinVarEntry"
root = self.getXMLRoot()
variants = {}
for DocumentSummary in root.iter( 'DocumentSummary' ):
uid = DocumentSummary.attrib["uid"]
var = clinvarvariant( uid=uid )
self.getClinVarVariantEntry( var , DocumentSummary )
self.getClinVarTraitEntry( var , DocumentSummary )
self.getClinVarClinicalEntry( var , DocumentSummary )
if var.genomicVar():
variants[var.genomicVar()] = var
else:
variants["null"] = var
print "entrez Warning: could not set ClinVar variant " + uid
return variants
def getClinVarVariantEntry( self , var , DocumentSummary ):
# print "\twebapi::entrez::entrezapi::getClinVarVariantEntry"
titleDetails = self.parseClinVarTitle( DocumentSummary )
# print titleDetails
var.referencePeptide = titleDetails["referencePeptide"]
var.positionPeptide = titleDetails["positionPeptide"]
var.alternatePeptide = titleDetails["alternatePeptide"]
var.positionCodon = titleDetails["positionCodon"]
for variation in DocumentSummary.iter( 'variation' ):
for variation_xref in DocumentSummary.iter( 'variation_xref' ):
dbs = self.getEntry( variation_xref , 'db_source' )
if dbs == entrezapi.dbsnp:
var.dbSNP = self.getEntry( variation_xref , 'db_id' )
#print "dbSNP rs" + var.dbSNP
#if dbs == entrezapi.omim:
#var.omim = self.getEntry( variation_xref , 'db_id' )
#print "OMIM " + var.omim
for assembly_set in variation.iter( 'assembly_set' ):
assembly_name = self.getEntry( assembly_set , 'assembly_name' )
if assembly_name == self.assembly:
var.chromosome = assembly_set.find( 'chr' ).text
var.start = assembly_set.find( 'start' ).text
var.stop = assembly_set.find( 'stop' ).text
var.alternate = assembly_set.find( 'alt' ).text
var.reference = assembly_set.find( 'ref' ).text
#assembly_acc_ver = assembly_set.find( 'assembly_acc_ver' )
for gene in DocumentSummary.iter( 'gene' ):
var.gene = self.getEntry( gene , 'symbol' )
var.strand = self.getEntry( gene , 'strand' )
if not var.genomicVar():
print "entrez Warning: no ClinVar variant entry"
def getClinVarTraitEntry( self , var , DocumentSummary ):
# print "\twebapi::entrez::entrezapi::getClinVarTraitEntry"
try:
for trait in DocumentSummary.iter( 'trait' ):
trait_name = self.getEntry( trait , 'trait_name' )
trait_xrefs = {}
var.trait = { trait_name : trait_xrefs }
for trait_xref in trait.iter( 'trait_xref' ):
db_source = self.getEntry( trait_xref , 'db_source' )
db_id = self.getEntry( trait_xref , 'db_id' )
txr = {}
if trait_name in trait_xrefs:
txr = trait_xrefs[trait_name]
txr.update( { db_source : db_id } )
trait_xrefs.update( { trait_name : txr } )
var.trait.update( { trait_name : trait_xrefs } )
except:
print "entrez Warning: no ClinVar trait entry"
pass
def getClinVarClinicalEntry( self , var , DocumentSummary ):
# print "\twebapi::entrez::entrezapi::getClinVarClinicalEntry"
try:
for clinical_significance in DocumentSummary.iter( 'clinical_significance' ):
var.clinical["description"] = self.getEntry( clinical_significance , 'description' ).strip()
var.clinical["review_status"] = self.getEntry( clinical_significance , 'review_status' ).strip()
except:
print "entrez Warning: no ClinVar clinical entry"
pass
def getClinVarPubMedIDs( self , var , DocumentSummary ):
try:
return var.linkPubMed()
except:
print "entrez Error: no ClinVar uid"
pass
def searchPubMed( self , query ):
self.subset = entrezapi.esearch
self.database = entrezapi.pubmed
self.action = self.buildSearchAction( query )
#print self.subset
#print self.database
#print self.action
return self.submit( )
def searchClinVar( self , query , **kwargs ):
self.subset = entrezapi.esearch
self.database = entrezapi.clinvar
self.action = self.buildSearchAction( query )
#print self.subset
#print self.database
#print self.action
return self.submit( )
def getSNPGlobalMAF( self , rs ):
self.addQuery( rs )
self.subset = entrezapi.esummary
self.submit()
root = self.getXMLRoot()
entries = {}
for DocSum in root.iter( 'DocSum' ):
print ""
def setRequestLimits( self ):
self.setRequestLimit( entrezapi.requestsPerWindow )
self.setSearchBatchSize( entrezapi.searchBatchSize )
self.setSummaryBatchSize( entrezaip.summaryBatchSize )
self.setTimeWindow( entrezaip.timeWindow ) | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_en-nr.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Sunday",
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
],
"MONTH": [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December"
],
"SHORTDAY": [
"Sun",
"Mon",
"Tue",
"Wed",
"Thu",
"Fri",
"Sat"
],
"SHORTMONTH": [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec"
],
"fullDate": "EEEE, MMMM d, y",
"longDate": "MMMM d, y",
"medium": "MMM d, y h:mm:ss a",
"mediumDate": "MMM d, y",
"mediumTime": "h:mm:ss a",
"short": "M/d/yy h:mm a",
"shortDate": "M/d/yy",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "$",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "en-nr",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/FitsGeo-1.0.0.tar.gz/FitsGeo-1.0.0/fitsgeo/export.py | from .surface import created_surfaces
from .material import created_materials
from .cell import created_cells
def phits_export(
to_file=False, inp_name="example",
export_surfaces=True, export_materials=True, export_cells=True):
# TODO: improve export to file
"""
Function for printing defined sections in PHITS format, uses created_surfaces,
created_materials lists which contain all defined objects
:param to_file: flag to export sections in input file
:param inp_name: name for input file export
:param export_surfaces: flag for [ Surface ] section export
:param export_materials: flag for [ Material ] section export
:param export_cells: flag for [ Cell ] section export
"""
text_materials = ""
if not created_materials:
print("No material is defined!\ncreated_materials list is empty!")
export_materials = False
else:
text_materials = "\n[ Material ]\n"
for mat in created_materials:
if mat.phits_print() != "":
text_materials += mat.phits_print() + "\n"
# For colors
text_materials += "\n[ Mat Name Color ]\n\tmat\tname\tsize\tcolor\n"
for mat in created_materials:
if mat.matn > 0: # To avoid outer and void
mat_name = "{"+mat.name.replace('_', '\\_')+"}"
text_materials += \
f"\t{mat.matn}\t{mat_name}\t1.00\t{mat.color}\n"
# ------------------------------------------------------------------------------
text_surfaces = ""
if not created_surfaces:
print("No surface is defined!\ncreated_surfaces list is empty!")
export_surfaces = False
else:
text_surfaces = "\n[ Surface ]\n"
for s in created_surfaces:
text_surfaces += s.phits_print() + "\n"
# ------------------------------------------------------------------------------
text_cells = ""
if not created_cells:
print("No cell is defined!\ncreated_cells list is empty!")
export_cells = False
else:
text_cells = "\n[ Cell ]\n"
for c in created_cells:
text_cells += c.phits_print() + "\n"
# ------------------------------------------------------------------------------
print(text_materials+text_surfaces+text_cells)
if to_file:
with open(f"{inp_name}_FitsGeo.inp", "w", encoding="utf-8") as f:
if export_materials:
f.write(text_materials)
if export_surfaces:
f.write(text_surfaces)
if export_cells:
f.write(text_cells)
if __name__ == "__main__":
print(
"--- Welcome to FitsGeo! ---\n" +
"This is a module for FitsGeo!\nImport FitsGeo to use.") | PypiClean |
/dipex-4.54.5.tar.gz/dipex-4.54.5/integrations/SD_Lon/sdlon/sd_fixup.py | import datetime
from datetime import date
from functools import partial
from operator import itemgetter
from typing import List
from typing import Optional
from typing import Tuple
from uuid import UUID
import click
import httpx
from gql import gql
from more_itertools import consume
from more_itertools import flatten
from more_itertools import one
from more_itertools import only
from more_itertools import side_effect
from more_itertools import unzip
from os2mo_helpers.mora_helpers import MoraHelper
from ra_utils.apply import apply
from ra_utils.load_settings import load_setting
from raclients.graph.client import GraphQLClient
from raclients.graph.client import SyncClientSession
from tqdm import tqdm
from . import sd_payloads
from .config import get_importer_settings
from .sd_changed_at import ChangeAtSD
from .sd_common import EmploymentStatus
from .sd_common import mora_assert
from .sd_common import primary_types
from .sd_common import sd_lookup
def fetch_user_employments(cpr: str) -> List:
# Notice, this will not get future engagements
params = {
"PersonCivilRegistrationIdentifier": cpr,
"StatusActiveIndicator": "true",
"StatusPassiveIndicator": "true",
"DepartmentIndicator": "true",
"EmploymentStatusIndicator": "true",
"ProfessionIndicator": "true",
"WorkingTimeIndicator": "true",
"UUIDIndicator": "true",
"SalaryAgreementIndicator": "false",
"SalaryCodeGroupIndicator": "false",
"EffectiveDate": date.today().strftime("%d.%m.%Y"),
}
sd_employments_response = sd_lookup("GetEmployment20111201", None, params)
if "Person" not in sd_employments_response:
return []
employments = sd_employments_response["Person"]["Employment"]
if not isinstance(employments, list):
employments = [employments]
return employments
def get_orgfunc_from_vilkaarligrel(
class_uuid: str, mox_base: str = "http://localhost:5000/lora"
) -> dict:
url = f"{mox_base}/organisation/organisationfunktion"
params = {"vilkaarligrel": class_uuid, "list": "true", "virkningfra": "-infinity"}
r = httpx.get(url, params=params)
r.raise_for_status()
return only(r.json()["results"], default={})
def get_user_from_org_func(org_func: dict) -> Optional[str]:
registrations = one(org_func["registreringer"])
user = one(registrations["relationer"]["tilknyttedebrugere"])
return user["uuid"]
def filter_missing_data(leave: dict) -> bool:
return not one(leave["registreringer"])["relationer"].get("tilknyttedefunktioner")
def delete_orgfunc(uuid: str, mox_base: str = "http://localhost:5000/lora") -> None:
r = httpx.delete(f"{mox_base}/organisation/organisationfunktion/{uuid}")
r.raise_for_status()
def fixup(ctx, mo_employees):
def fetch_mo_engagements(mo_employee) -> dict:
mo_uuid = mo_employee["uuid"]
mo_engagements = mora_helper.read_user_engagement(user=mo_uuid, read_all=True)
no_salary_mo_engagements = list(
filter(
lambda mo_engagement: mo_engagement["primary"]["user_key"] == "status0",
mo_engagements,
)
)
mo_salary_userkeys = map(itemgetter("user_key"), no_salary_mo_engagements)
mo_dict = dict(zip(mo_salary_userkeys, no_salary_mo_engagements))
return mo_dict
def fetch_sd_employments(mo_employee):
mo_cpr = mo_employee["cpr_no"]
sd_employments = fetch_user_employments(mo_cpr)
sd_ids = map(itemgetter("EmploymentIdentifier"), sd_employments)
sd_dict = dict(zip(sd_ids, sd_employments))
return sd_dict
def fetch_pairs(mo_employee):
try:
mo_dict = fetch_mo_engagements(mo_employee)
if not mo_dict:
return None
sd_dict = fetch_sd_employments(mo_employee)
return mo_dict, sd_dict
except Exception as exp:
print(mo_employee)
print(exp)
return None
def process_tuples(mo_dict, sd_dict):
# Find intersection
common_keys = mo_dict.keys() & sd_dict.keys()
for key in common_keys:
yield (key, mo_dict[key], sd_dict[key])
def sd_not_status_0(work_tuple):
key, mo_engagement, sd_employment = work_tuple
sd_status = sd_employment["EmploymentStatus"]["EmploymentStatusCode"]
return sd_status != "0"
def generate_payload(work_tuple):
key, mo_engagement, sd_employment = work_tuple
print("Fixing", key)
data = {
"validity": mo_engagement["validity"],
"primary": {"uuid": primary["non_primary"]},
}
payload = sd_payloads.engagement(data, mo_engagement)
return payload
mora_helper = ctx["mora_helper"]
primary = primary_types(mora_helper)
if ctx["progress"]:
mo_employees = tqdm(mo_employees, unit="Employee")
# Dict pair is an iterator of (dict, dict) tuples or None
# First dict is a mapping from employment_id to mo_engagement
# Second dict is a mapping from employment_id to sd_engagement
dict_pairs = map(fetch_pairs, mo_employees)
# Remove all the None's from dict_pairs
dict_pairs = filter(None.__ne__, dict_pairs)
# Convert dict_pairs into an iterator of three tuples:
# (key, mo_engagement, sd_employment)
# 'key' is the shared employment_id
work_tuples = flatten(map(process_tuples, *unzip(dict_pairs)))
# Filter all tuples, where the sd_employment has status 0
work_tuples = filter(sd_not_status_0, work_tuples)
# At this point, we have a tuple of items which need to be updated / fixed
# Convert all the remaining tuples to MO payloads
payloads = map(generate_payload, work_tuples)
if ctx["dry_run"]:
return
for payload in payloads:
response = mora_helper._mo_post("details/edit", payload)
mora_assert(response)
def read_associations(session: SyncClientSession, org_unit_uuids: list[UUID]):
"""Query every association to the provided org_units"""
query = gql(
"""
query AssociationsQuery ($org_units: [UUID!]) {
associations(org_units: $org_units) {
objects {
uuid
association_type_uuid
validity {
from
}
}
}
}
"""
)
r = session.execute(
query, variable_values={"org_units": [str(u) for u in org_unit_uuids]}
)
return [one(a["objects"]) for a in r["associations"]]
def fix_association_types(
session: SyncClientSession,
association_uuid: UUID,
from_date: str,
correct_association_type_uuid: UUID,
):
"""Update given associations with the given association type"""
query = gql(
"""
mutation UpdateAssociation($uuid: UUID!, $from: DateTime!, $association_type: UUID!) {
association_update(
input: {uuid: $uuid, validity: {from: $from}, association_type: $association_type}
) {
uuid
}
}
"""
)
session.execute(
query,
variable_values={
"uuid": str(association_uuid),
"from": from_date,
"association_type": str(correct_association_type_uuid),
},
)
@click.group()
@click.option(
"--mora-base",
default="http://localhost:5000",
help="URL for MO.",
envvar="MORA_BASE",
)
@click.option("--json/--no-json", default=False, help="Output as JSON.")
@click.option("--progress/--no-progress", default=False, help="Print progress.")
@click.option("--fixup-status-0", default=False, help="Attempt to fix status-0 issues.")
@click.option(
"--dry-run/--no-dry-run", default=False, help="Dry-run making no actual changes."
)
@click.pass_context
def cli(ctx, mora_base, **kwargs):
"""Tool to fixup MO entries according to SD data.
This tool should never be needed, as it indicates issues in the main code.
It is however needed due to the quality of the main code.
"""
# ensure that ctx.obj exists and is a dict, no matter how it is called.
ctx.ensure_object(dict)
ctx.obj = dict(kwargs)
ctx.obj["mora_base"] = mora_base
ctx.obj["mora_helper"] = MoraHelper(hostname=mora_base, use_cache=False)
@cli.command()
@click.option(
"--uuid", type=click.UUID, help="UUID of the user to check.", required=True
)
@click.pass_context
# Example UUID: 'fadfcc38-5d42-4857-a950-0adc65babb13'
def fixup_user(ctx, uuid):
"""Fix a single employee."""
mo_employees = [ctx.obj["mora_helper"].read_user(user_uuid=uuid)]
fixup(ctx.obj, mo_employees)
@cli.command()
@click.option(
"--root-uuid", type=click.UUID, help="UUID of root SD org_unit", required=True
)
@click.option(
"--client-id", envvar="CLIENT_ID", help="Keycloak client id", default="dipex"
)
@click.option(
"--client-secret",
envvar="CLIENT_SECRET",
help="Keycloak client secret",
)
@click.option(
"--auth-realm", envvar="AUTH_REALM", help="Keycloak auth realm", default="mo"
)
@click.option(
"--auth-server",
envvar="AUTH_SERVER",
help="Keycloak auth server",
default="http://localhost:5000/auth",
)
@click.pass_context
def fixup_associations(
ctx, root_uuid, client_id, client_secret, auth_realm, auth_server
):
"""Ensure all associations are of the type "SD-medarbejder"."""
mora_helper = ctx.obj["mora_helper"]
org = mora_helper.read_organisation()
org_units = mora_helper.read_ou_root(org, str(root_uuid))
org_unit_uuids = [o["uuid"] for o in org_units]
association_type_uuid = mora_helper.ensure_class_in_facet(
"association_type", "SD-Medarbejder"
)
with GraphQLClient(
url=f"{ctx.obj['mora_base']}/graphql/v3",
client_id=client_id,
client_secret=client_secret,
auth_realm=auth_realm,
auth_server=auth_server,
sync=True,
httpx_client_kwargs={"timeout": None},
) as session:
associations = read_associations(session, org_unit_uuids)
filtered_associations = {
a["uuid"]: a["validity"]["from"]
for a in associations
if a["association_type_uuid"] != str(association_type_uuid)
}
if ctx.obj["dry_run"]:
click.echo(
f"Found {len(list(filtered_associations))} associations that needs to be changed."
)
click.echo(filtered_associations.keys())
return
for uuid, from_date in filtered_associations.items():
try:
fix_association_types(
session=session,
association_uuid=uuid,
from_date=from_date,
correct_association_type_uuid=association_type_uuid,
)
except:
click.echo(f"Error processing association with {uuid=}")
@cli.command()
@click.option(
"--mox-base",
default=load_setting("mox.base", "http://localhost:5000/lora"),
help="URL for Lora",
)
@click.pass_context
def fixup_leaves(ctx, mox_base):
"""Fix all leaves that are missing a link to an engagement."""
settings = get_importer_settings()
mora_helper = ctx.obj["mora_helper"]
# Find all classes of leave_types
leave_types, _ = mora_helper.read_classes_in_facet("leave_type")
leave_type_uuids = map(itemgetter("uuid"), leave_types)
# Get all leave objects
orgfunc_getter = partial(get_orgfunc_from_vilkaarligrel, mox_base=mox_base)
leave_objects = map(orgfunc_getter, leave_type_uuids)
leave_objects = list(flatten(leave_objects))
# Filter to get only those missing the 'engagement'.
leave_objects = list(filter(filter_missing_data, leave_objects))
leave_uuids = set(map(itemgetter("id"), leave_objects))
# Delete old leave objects
if ctx.obj["dry_run"]:
click.echo(f"Dry-run. Would delete {len(leave_uuids)} leave objects")
else:
orgfunc_deleter = partial(delete_orgfunc, mox_base=mox_base)
leave_uuids = tqdm(
leave_uuids,
unit="leave",
desc="Deleting old leaves",
disable=not ctx.obj["progress"],
)
consume(side_effect(orgfunc_deleter, leave_uuids))
# Find all user uuids and cprs
user_uuids = set(map(get_user_from_org_func, leave_objects))
user_uuids = tqdm(
user_uuids,
unit="user",
desc="Looking up users in MO",
disable=not ctx.obj["progress"],
)
users = map(mora_helper.read_user, user_uuids)
cpr_uuid_map = dict(map(itemgetter("cpr_no", "uuid"), users))
# NOTE: This will only reimport current leaves, not historic ones
# This behavior is inline with sd_importer.py
changed_at = ChangeAtSD(settings, datetime.datetime.now())
def try_fetch_leave(cpr: str) -> Tuple[str, List[dict]]:
"""Attempt to lookup engagements from a CPR.
Prints any errors but continues
"""
employments = []
try:
employments = fetch_user_employments(cpr=cpr)
except Exception as e:
click.echo(e)
# filter leaves
leaves = list(
filter(
lambda employment: EmploymentStatus(
employment["EmploymentStatus"]["EmploymentStatusCode"]
)
== EmploymentStatus.Orlov,
employments,
)
)
return cpr, leaves
cprs = tqdm(
cpr_uuid_map.keys(),
desc="Lookup users in SD",
unit="User",
disable=not ctx.obj["progress"],
)
leaves = dict(map(try_fetch_leave, cprs))
# Filter users with leave
leaves = dict(filter(apply(lambda cpr, engagement: engagement), leaves.items()))
if ctx.obj["dry_run"]:
click.echo(f"Dry-run. Would reimport leaves for {len(leaves)} users.")
return
for cpr, leaves in tqdm(
leaves.items(),
unit="user",
desc="Reimporting leaves for users",
disable=not ctx.obj["progress"],
):
for leave in leaves:
changed_at.create_leave(
leave["EmploymentStatus"],
leave["EmploymentIdentifier"],
cpr_uuid_map[cpr],
)
@cli.command()
@click.option(
"--uuid", type=click.UUID, help="UUID of the organisation to check.", required=True
)
@click.pass_context
# Example UUID: 'ea5a237a-8f8b-4300-9a00-000006180002'
def fixup_department(ctx, uuid):
"""Fix all employees in an organisation."""
mo_employees = (
ctx.obj["mora_helper"].read_organisation_people(uuid, read_all=True).keys()
)
fixup(ctx.obj, mo_employees)
@cli.command()
@click.option(
"--limit",
type=click.INT,
help="Number of employees to check. 0 --> All.",
default=5,
)
@click.pass_context
def fixup_all(ctx, limit):
"""Fix limit/all employees in an MO instance."""
mo_employees = ctx.obj["mora_helper"].read_all_users(limit=limit)
fixup(ctx.obj, mo_employees)
if __name__ == "__main__":
cli() | PypiClean |
/IsoCon-0.3.3.tar.gz/IsoCon-0.3.3/modules/nearest_neighbor_graph.py | from __future__ import print_function
import os,sys
import argparse
import re
import math
import edlib
import signal
from multiprocessing import Pool
import multiprocessing as mp
import sys
from modules.input_output import write_output
def get_nearest_neighbors_helper(arguments):
args, kwargs = arguments
return get_nearest_neighbors(*args, **kwargs)
def get_exact_nearest_neighbor_graph(seq_to_acc_list_sorted, has_converged, params):
if params.nr_cores == 1:
best_edit_distances = get_nearest_neighbors(seq_to_acc_list_sorted, 0, 0, seq_to_acc_list_sorted, has_converged, params.neighbor_search_depth)
# implement check here to se that all seqs got a nearest_neighbor, if not, print which noes that did not get a nearest_neighbor computed.!
else:
####### parallelize alignment #########
# pool = Pool(processes=mp.cpu_count())
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGINT, original_sigint_handler)
pool = Pool(processes=params.nr_cores)
# here we split the input into chunks
chunk_size = max(int(len(seq_to_acc_list_sorted) / (10*params.nr_cores)), 20 )
ref_seq_chunks = [ ( max(0, i - params.neighbor_search_depth -1), seq_to_acc_list_sorted[max(0, i - params.neighbor_search_depth -1) : i + chunk_size + params.neighbor_search_depth +1 ]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size) ]
chunks = [(i, seq_to_acc_list_sorted[i:i + chunk_size]) for i in range(0, len(seq_to_acc_list_sorted), chunk_size)]
if params.verbose:
write_output.logger(str([j for j, ch in ref_seq_chunks]), params.develop_logfile, timestamp=False)
write_output.logger("reference chunks:" + str([len(ch) for j,ch in ref_seq_chunks]), params.develop_logfile, timestamp=False)
# print([j for j, ch in ref_seq_chunks])
# print("reference chunks:", [len(ch) for j,ch in ref_seq_chunks])
write_output.logger(str([i for i,ch in chunks]), params.develop_logfile, timestamp=False)
write_output.logger("query chunks:" + str([len(ch) for i,ch in chunks]), params.develop_logfile, timestamp=False)
print([i for i,ch in chunks])
print("query chunks:", [len(ch) for i,ch in chunks])
already_converged_chunks = []
for i, chunk in chunks:
already_converged_chunk = set()
for seq, acc in chunk:
if seq in has_converged:
already_converged_chunk.add(seq)
already_converged_chunks.append(already_converged_chunk)
if params.verbose:
write_output.logger("already converged chunks: " + str([len(ch) for ch in already_converged_chunks]), params.develop_logfile, timestamp=False)
# get nearest_neighbors takes thre sub containers:
# chunk - a container with (sequences, accesions)-tuples to be aligned (queries)
# ref_seq_chunks - a container with (sequences, accesions)-tuples to be aligned to (references)
# already_converged_chunks - a set of query sequences that has already converged
try:
res = pool.map_async(get_nearest_neighbors_helper, [ ((chunks[i][1], chunks[i][0], chunks[i][0] - ref_seq_chunks[i][0], ref_seq_chunks[i][1], already_converged_chunks[i], params.neighbor_search_depth), {}) for i in range(len(chunks))] )
best_edit_distances_results =res.get(999999999) # Without the timeout this blocking call ignores all signals.
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
sys.exit()
else:
# print("Normal termination")
pool.close()
pool.join()
best_edit_distances = {}
for sub_graph in best_edit_distances_results:
for seq in sub_graph:
assert seq not in best_edit_distances
best_edit_distances.update(sub_graph)
return best_edit_distances
def read_fasta(fasta_file):
fasta_seqs = {}
k = 0
temp = ''
accession = ''
for line in fasta_file:
if line[0] == '>' and k == 0:
accession = line[1:].strip()
fasta_seqs[accession] = ''
k += 1
elif line[0] == '>':
yield accession, temp
temp = ''
accession = line[1:].strip()
else:
temp += line.strip()
if accession:
yield accession, temp
def edlib_ed(x, y, mode="NW", task="distance", k=1):
result = edlib.align(x, y, mode=mode, task=task, k=k)
ed = result["editDistance"]
return ed
def get_nearest_neighbors(batch_of_queries, global_index_in_matrix, start_index, seq_to_acc_list_sorted, has_converged, neighbor_search_depth):
best_edit_distances = {}
lower_target_edit_distances = {}
# print("Processing global index:" , global_index_in_matrix)
# error_types = {"D":0, "S": 0, "I": 0}
for i in range(start_index, start_index + len(batch_of_queries)):
if i % 500 == 0:
print("processing ", i)
seq1 = seq_to_acc_list_sorted[i][0]
acc1 = seq_to_acc_list_sorted[i][1]
best_edit_distances[acc1] = {}
if seq1 in has_converged:
# print("ctd here")
continue
if acc1 in lower_target_edit_distances:
best_ed = lower_target_edit_distances[acc1]
# print("already_comp", best_ed )
else:
best_ed = len(seq1)
stop_up = False
stop_down = False
j = 1
while True:
# for j in range(1,len(seq_to_acc_list_sorted)):
if i - j < 0:
stop_down = True
if i + j >= len(seq_to_acc_list_sorted):
stop_up = True
if not stop_down:
seq2 = seq_to_acc_list_sorted[i - j][0]
acc2 = seq_to_acc_list_sorted[i - j][1]
if math.fabs(len(seq1) - len(seq2)) > best_ed:
stop_down = True
if not stop_up:
seq3 = seq_to_acc_list_sorted[i + j][0]
acc3 = seq_to_acc_list_sorted[i + j][1]
if math.fabs(len(seq1) - len(seq3)) > best_ed:
stop_up = True
if not stop_down:
edit_distance = edlib_ed(seq1, seq2, mode="NW", task="distance", k=best_ed)
if 0 < edit_distance < best_ed:
best_ed = edit_distance
best_edit_distances[acc1] = {}
best_edit_distances[acc1][acc2] = edit_distance
elif edit_distance == best_ed:
best_edit_distances[acc1][acc2] = edit_distance
if acc2 in lower_target_edit_distances:
if 0 < edit_distance < lower_target_edit_distances[acc2]:
lower_target_edit_distances[acc2] = edit_distance
else:
if 0 < edit_distance:
lower_target_edit_distances[acc2] = edit_distance
if not stop_up:
edit_distance = edlib_ed(seq1, seq3, mode="NW", task="distance", k=best_ed)
if 0 < edit_distance < best_ed:
best_ed = edit_distance
best_edit_distances[acc1] = {}
best_edit_distances[acc1][acc3] = edit_distance
elif edit_distance == best_ed:
best_edit_distances[acc1][acc3] = edit_distance
if acc3 in lower_target_edit_distances:
if 0 < edit_distance < lower_target_edit_distances[acc3]:
lower_target_edit_distances[acc3] = edit_distance
else:
if 0 < edit_distance:
lower_target_edit_distances[acc3] = edit_distance
if stop_down and stop_up:
break
if j >= neighbor_search_depth:
break
j += 1
# if best_edit_distances[acc1]:
# print("best ed:", best_ed)
# if best_ed > 100:
# print(best_ed, "for seq with length", len(seq1), seq1)
return best_edit_distances
def compute_2set_nearest_neighbor_graph(X, C, params):
seq_to_acc_queries = [(seq, acc) for (acc, seq) in X.items()] #{seq: acc for (acc, seq) in read_fasta(open(args.consensus_transcripts, 'r'))}
# seq_to_acc_list_queries = list(seq_to_acc_queries.items())
seq_to_acc_targets = [(seq, acc) for (acc, seq) in C.items()] #{seq: acc for (acc, seq) in read_fasta(open(args.target_transcripts, 'r'))}
# seq_to_acc_list_targets = list(seq_to_acc_targets.items())
seq_to_acc_list_sorted_all = sorted(seq_to_acc_queries + seq_to_acc_targets, key= lambda x: len(x[0]))
nearest_neighbor_graph_x_to_c = get_exact_nearest_neighbor_graph_2set(seq_to_acc_list_sorted_all, set(C.keys()), params)
# TAKE CARE OF UNALIGNED READS HERE?
edges = 0
tot_ed = 0
edit_hist =[]
neighbors = []
for x in nearest_neighbor_graph_x_to_c:
for c in nearest_neighbor_graph_x_to_c[x]:
edges += 1
tot_ed += nearest_neighbor_graph_x_to_c[x][c]
edit_hist.append(nearest_neighbor_graph_x_to_c[x][c])
neighbors.append(len(nearest_neighbor_graph_x_to_c[x]))
if params.verbose:
print("Number of edges:", edges)
print("Total edit distance:", tot_ed)
print("Avg ed (ed/edges):", tot_ed/ float(edges))
# histogram(edit_hist, args, name='edit_distances.png', x='x-axis', y='y-axis', x_cutoff=100, title="Edit distances in nearest_neighbor graph")
# histogram(edit_hist, args, name='edit_distances_zoomed.png', x='x-axis', y='y-axis', x_cutoff=5, title="Edit distances in nearest_neighbor graph")
# histogram(neighbors, args, name='neighbours.png', x='x-axis', y='y-axis', title="Number of neighbours in nearest_neighbor graph")
# histogram(neighbors, args, name='neighbours_zoomed.png', x='x-axis', y='y-axis', x_cutoff=20, title="Number of neighbours in nearest_neighbor graph")
return nearest_neighbor_graph_x_to_c
def compute_nearest_neighbor_graph(S, has_converged, params):
"""
strings S are all unique here.
"""
# consensus_transcripts = {acc: seq for (acc, seq) in read_fasta(open(params.consensus_transcripts, 'r'))}
# print("Number of consensus:", len(consensus_transcripts))
seq_to_acc = { seq : acc for (acc, seq) in S.items() }
seq_to_acc_list = list(seq_to_acc.items())
seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: len(x[0]))
# for s, acc in seq_to_acc_list_sorted:
# print(len(s), s[:20], s[:20].count("A"), s[:20].count("C"), s[:20].count("G"), s[:20].count("T"), acc)
########################
# seq_to_acc_RC = [ (reverse_complement(seq), acc+"_RC") for (acc, seq) in S.items() ]
# all_seqs = seq_to_acc_RC + seq_to_acc_list
# seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: (len(x[0]), x[0][:20]) ) # sort first on length, then on 20mer
# seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: (x[0][:20], len(x[0])) ) # sort first on 20mer, then on length
# for s, acc in seq_to_acc_list_sorted:
# print(len(s), s[:20], s[:20].count("A"), s[:20].count("C"), s[:20].count("G"), s[:20].count("T")) #, acc)
# sys.exit()
########################
collapsed_consensus_transcripts = { acc : seq for (seq, acc) in seq_to_acc.items() }
# print("Number of collapsed consensus:", len(collapsed_consensus_transcripts))
nearest_neighbor_graph = get_exact_nearest_neighbor_graph(seq_to_acc_list_sorted, has_converged, params)
s1 = set()
for acc1 in nearest_neighbor_graph:
s1.add(S[acc1])
s2 = set([seq for seq in seq_to_acc] )
isolated = s2.difference(s1)
print("isolated:", len(isolated))
# print("isolated:", isolated)
edges = 0
tot_ed = 0
edit_hist =[]
neighbors = []
for r1 in nearest_neighbor_graph:
for r2 in nearest_neighbor_graph[r1]:
edges += 1
tot_ed += nearest_neighbor_graph[r1][r2]
edit_hist.append(nearest_neighbor_graph[r1][r2])
neighbors.append(len(nearest_neighbor_graph[r1]))
if params.verbose:
print("Number of edges:", edges)
print("Total edit distance:", tot_ed)
print("Avg ed (ed/edges):", tot_ed/ float(edges))
# histogram(edit_hist, params, name='edit_distances.png', x='x-axis', y='y-axis', x_cutoff=100, title="Edit distances in nearest_neighbor graph")
# histogram(neighbors, params, name='neighbours.png', x='x-axis', y='y-axis', title="Number of neighbours in nearest_neighbor graph")
# histogram(neighbors, params, name='neighbours_zoomed.png', x='x-axis', y='y-axis', x_cutoff=20, title="Number of neighbours in nearest_neighbor graph")
return nearest_neighbor_graph, isolated
def get_exact_nearest_neighbor_graph_2set(seq_to_acc_list_sorted_all, target_accessions, params):
if params.nr_cores == 1:
best_edit_distances = get_nearest_neighbors_2set(seq_to_acc_list_sorted_all, 0, seq_to_acc_list_sorted_all, target_accessions, params.neighbor_search_depth)
# implement check here to se that all seqs got a nearest_neighbor, if not, print which noes that did not get a nearest_neighbor computed.!
else:
####### parallelize alignment #########
# pool = Pool(processes=mp.cpu_count())
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGINT, original_sigint_handler)
pool = Pool(processes=params.nr_cores)
chunk_size = max(int(len(seq_to_acc_list_sorted_all) / (10*params.nr_cores)), 20 )
chunks = [(i, seq_to_acc_list_sorted_all[i:i + chunk_size]) for i in range(0, len(seq_to_acc_list_sorted_all), chunk_size)]
print([i for i in range(0, len(seq_to_acc_list_sorted_all), chunk_size)])
print([len(ch) for i,ch in chunks])
try:
res = pool.map_async(get_nearest_neighbors_2set_helper, [ ((chunk, i , seq_to_acc_list_sorted_all, target_accessions, params.neighbor_search_depth), {}) for i,chunk in chunks] )
best_edit_distances_results =res.get(999999999) # Without the timeout this blocking call ignores all signals.
except KeyboardInterrupt:
print("Caught KeyboardInterrupt, terminating workers")
pool.terminate()
sys.exit()
else:
# print("Normal termination")
pool.close()
pool.join()
best_edit_distances = {}
for sub_graph in best_edit_distances_results:
for seq in sub_graph:
assert seq not in best_edit_distances
best_edit_distances.update(sub_graph)
return best_edit_distances
def get_nearest_neighbors_2set_helper(arguments):
args, kwargs = arguments
return get_nearest_neighbors_2set(*args, **kwargs)
def get_nearest_neighbors_2set(batch, start_index, seq_to_acc_list_sorted, target_accessions, neighbor_search_depth):
best_edit_distances = {}
error_types = {"D":0, "S": 0, "I": 0}
for i in range(start_index, start_index + len(batch)):
if i % 50 == 0:
print("processing ", i)
seq1 = seq_to_acc_list_sorted[i][0]
acc1 = seq_to_acc_list_sorted[i][1]
if acc1 in target_accessions:
continue
# reach here and we have a query read to find best alignment for
best_edit_distances[acc1] = {}
best_ed = len(seq1)
stop_up = False
stop_down = False
processed_reads_to_candidates_alignments = 0
j = 1
while True:
# for j in range(1,len(seq_to_acc_list_sorted)):
if i - j < 0:
stop_down = True
if i + j >= len(seq_to_acc_list_sorted):
stop_up = True
if not stop_down:
seq2 = seq_to_acc_list_sorted[i - j][0]
acc2 = seq_to_acc_list_sorted[i - j][1]
if math.fabs(len(seq1) - len(seq2)) > best_ed:
stop_down = True
if not stop_up:
seq3 = seq_to_acc_list_sorted[i + j][0]
acc3 = seq_to_acc_list_sorted[i + j][1]
if math.fabs(len(seq1) - len(seq3)) > best_ed:
stop_up = True
if not stop_down and acc2 in target_accessions:
processed_reads_to_candidates_alignments += 1
# if seq1 == seq2:
# print("ID:", acc1, acc2)
edit_distance = edlib_ed(seq1, seq2, mode="NW", task="distance", k=best_ed)
if 0 <= edit_distance < best_ed:
best_ed = edit_distance
best_edit_distances[acc1] = {}
best_edit_distances[acc1][acc2] = best_ed
# lower_target_edit_distances[acc2] = best_ed
elif edit_distance == best_ed:
best_edit_distances[acc1][acc2] = best_ed
if not stop_up and acc3 in target_accessions:
processed_reads_to_candidates_alignments += 1
# if seq1 == seq3:
# print("ID:", acc1, acc3)
edit_distance = edlib_ed(seq1, seq3, mode="NW", task="distance", k=best_ed)
if 0 <= edit_distance < best_ed:
best_ed = edit_distance
best_edit_distances[acc1] = {}
best_edit_distances[acc1][acc3] = best_ed
# lower_target_edit_distances[acc3] = best_ed
elif edit_distance == best_ed:
best_edit_distances[acc1][acc3] = best_ed
if stop_down and stop_up:
break
if processed_reads_to_candidates_alignments >= neighbor_search_depth:
break
j += 1
# print("best ed:", best_ed)
# if best_ed > 100:
# print(best_ed, "for seq with length", len(seq1), seq1)
return best_edit_distances
def main(args):
consensus_transcripts = {acc: seq for (acc, seq) in read_fasta(open(args.consensus_transcripts, 'r'))}
print("Number of consensus:", len(consensus_transcripts))
seq_to_acc = {seq: acc for (acc, seq) in read_fasta(open(args.consensus_transcripts, 'r'))}
seq_to_acc_list = list(seq_to_acc.items())
seq_to_acc_list_sorted = sorted(seq_to_acc_list, key= lambda x: len(x[0]))
collapsed_consensus_transcripts = { acc : seq for (seq, acc) in seq_to_acc.items() }
# print("Number of collapsed consensus:", len(collapsed_consensus_transcripts))
nearest_neighbor_graph = get_exact_nearest_neighbor_graph(seq_to_acc_list_sorted, params)
s1 = set( [ collapsed_consensus_transcripts[acc2] for acc1 in nearest_neighbor_graph for acc2 in nearest_neighbor_graph[acc1] ])
s2 = set([seq for seq in seq_to_acc] )
isolated = s2.difference(s1)
print("isolated:", len(isolated))
edges = 0
tot_ed = 0
edit_hist =[]
neighbors = []
for r1 in nearest_neighbor_graph:
for r2 in nearest_neighbor_graph[r1]:
edges += 1
tot_ed += nearest_neighbor_graph[r1][r2]
edit_hist.append(nearest_neighbor_graph[r1][r2])
neighbors.append(len(nearest_neighbor_graph[r1]))
print("Number of edges:", edges)
print("Total edit distance:", tot_ed)
print("Avg ed (ed/edges):", tot_ed/ float(edges))
# histogram(edit_hist, args, name='edit_distances.png', x='x-axis', y='y-axis', x_cutoff=100, title="Edit distances in nearest_neighbor graph")
# histogram(neighbors, args, name='neighbours.png', x='x-axis', y='y-axis', title="Number of neighbours in nearest_neighbor graph")
# histogram(neighbors, args, name='neighbours_zoomed.png', x='x-axis', y='y-axis', x_cutoff=20, title="Number of neighbours in nearest_neighbor graph")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Evaluate pacbio IsoSeq transcripts.")
parser.add_argument('consensus_transcripts', type=str, help='Path to the consensus fasta file')
parser.add_argument('outfolder', type=str, help='Output path of results')
parser.add_argument('--single_core', dest='single_core', action='store_true', help='Force working on single core. ')
args = parser.parse_args()
outfolder = args.outfolder
if not os.path.exists(outfolder):
os.makedirs(outfolder)
main(args) | PypiClean |
/OSLogManagement-0.0.1.tar.gz/OSLogManagement-0.0.1/README.md | # README.md
Warning (bug not fixed): The LogManagemet package exports the terminal log in an unencrypted last_log.txt file, so it is exposed to plaintext in cases of forensic data recovery.
Designed to create variables with results that you can obtain by filtering the log that your terminal gives you by OS commands.
Instalation:
pip install OSLogManagement
>>> from LogManagement import os_commands_regex
>>> ip_adress = os_commands_regex(os_command='ipconfig', regex_function='search', regex_parameters='IP Address.+: (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})')
>>> print(ip_adress)
['192.168.0.101']
# convert list to string:
>>> ip =''.join(ip_adress)
>>> print(ip)
192.168.0.101
show_last_log()
"""_summary_
It is useful for copying special characters (which do not load in your text editor) and using them in regex parameters.
example:
>>> from LogManagement import show_last_log
>>> show_last_log()
INFO:root:
Configuraci¢n IP de Windows
Adaptador de LAN inal mbrica Wi-Fi:
Sufijo DNS espec¡fico para la conexi¢n. . : home
Direcci¢n IPv6 . . . . . . . . . . : 444:444:444:4444:4444:4444:4444:4444
Direcci¢n IPv6 . . . . . . . . . . : 333:333:333::3
Direcci¢n IPv6 . . . . . . . . . . : 333:333:333::3
V¡nculo: direcci¢n IPv6 local. . . : 222::222:222:222:222%2
Direcci¢n IPv4. . . . . . . . . . . . . . : 192.168.1.2
M scara de subred . . . . . . . . . . . . : 111.111.111.111
Puerta de enlace predeterminada . . . . . : 1111:1111:d1::a11f:c11a
>>>>>> ip_adress = os_commands_regex(os_command='ipconfig', regex_function='search', regex_parameters='Direcci¢n IPv4.+: (\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})')
>>> print(ip_adress)
['192.168.1.2']
"""
Warning (bug not fixed):
The LogManagemet package exports the terminal log in an unencrypted last_log.txt file, so it is exposed to plaintext in cases of forensic data recovery.
Useful content:
- regex101.com -> build, test and debug regex
- Tutorial práctico REGEX en español: https://www.youtube.com/watch?v=Mc2j8Q-MHB4&ab_channel=ThePyCoach -> credits: The PyCoach
| PypiClean |
/Djaloha-0.4.2.tar.gz/Djaloha-0.4.2/djaloha/static/aloha.0.20.20/plugins/extra/browser/vendor/grid.locale.de.js | define(['aloha/jquery'], function(jQuery) {
var $ = jQuery;
;(function($){
/**
* jqGrid German Translation
* Version 1.0.0 (developed for jQuery Grid 3.3.1)
* Olaf Klöppel [email protected]
* http://blue-hit.de/
*
* Updated for jqGrid 3.8
* Andreas Flack
* http://www.contentcontrol-berlin.de
*
* Dual licensed under the MIT and GPL licenses:
* http://www.opensource.org/licenses/mit-license.php
* http://www.gnu.org/licenses/gpl.html
**/
// $.jgrid = {
$.jgrid_de = {
defaults: {
recordtext: "Zeige {0} - {1} von {2}",
emptyrecords: "Keine Datensätze vorhanden",
loadtext: "Lädt...",
pgtext: "Seite {0} von {1}"
},
search: {
caption: "Suche...",
Find: "Suchen",
Reset: "Zurücksetzen",
odata: [ 'gleich', 'ungleich', 'kleiner', 'kleiner gleich','größer','größer gleich', 'beginnt mit','beginnt nicht mit','ist in','ist nicht in','endet mit','endet nicht mit','enthält','enthält nicht' ],
groupOps: [ { op: "AND", text: "alle" }, { op: "OR", text: "mindestens eine" } ],
matchText: " erfülle",
rulesText: " Bedingung(en)"
},
edit: {
addCaption: "Datensatz hinzufügen",
editCaption: "Datensatz bearbeiten",
bSubmit: "Speichern",
bCancel: "Abbrechen",
bClose: "Schließen",
saveData: "Daten wurden geändert! Änderungen speichern?",
bYes: "ja",
bNo: "nein",
bExit: "abbrechen",
msg: {
required: "Feld ist erforderlich",
number: "Bitte geben Sie eine Zahl ein",
minValue: "Wert muss größer oder gleich sein, als ",
maxValue: "Wert muss kleiner oder gleich sein, als ",
email: "ist keine gültige E-Mail-Adresse",
integer: "Bitte geben Sie eine Ganzzahl ein",
date: "Bitte geben Sie ein gültiges Datum ein",
url: "ist keine gültige URL. Präfix muss eingegeben werden ('http://' oder 'https://')",
nodefined: " ist nicht definiert!",
novalue: " Rückgabewert ist erforderlich!",
customarray: "Benutzerdefinierte Funktion sollte ein Array zurückgeben!",
customfcheck: "Benutzerdefinierte Funktion sollte im Falle der benutzerdefinierten Überprüfung vorhanden sein!"
}
},
view: {
caption: "Datensatz anzeigen",
bClose: "Schließen"
},
del: {
caption: "Löschen",
msg: "Ausgewählte Datensätze löschen?",
bSubmit: "Löschen",
bCancel: "Abbrechen"
},
nav: {
edittext: " ",
edittitle: "Ausgewählte Zeile editieren",
addtext: " ",
addtitle: "Neue Zeile einfügen",
deltext: " ",
deltitle: "Ausgewählte Zeile löschen",
searchtext: " ",
searchtitle: "Datensatz suchen",
refreshtext: "",
refreshtitle: "Tabelle neu laden",
alertcap: "Warnung",
alerttext: "Bitte Zeile auswählen",
viewtext: "",
viewtitle: "Ausgewählte Zeile anzeigen"
},
col: {
caption: "Spalten auswählen",
bSubmit: "Speichern",
bCancel: "Abbrechen"
},
errors: {
errcap: "Fehler",
nourl: "Keine URL angegeben",
norecords: "Keine Datensätze zu bearbeiten",
model: "colNames und colModel sind unterschiedlich lang!"
},
formatter: {
integer: {thousandsSeparator: ".", defaultValue: '0'},
number: {decimalSeparator: ",", thousandsSeparator: ".", decimalPlaces: 2, defaultValue: '0,00'},
currency: {decimalSeparator: ",", thousandsSeparator: ".", decimalPlaces: 2, prefix: "", suffix: " €", defaultValue: '0,00'},
date: {
dayNames: [
"So", "Mo", "Di", "Mi", "Do", "Fr", "Sa",
"Sonntag", "Montag", "Dienstag", "Mittwoch", "Donnerstag", "Freitag", "Samstag"
],
monthNames: [
"Jan", "Feb", "Mar", "Apr", "Mai", "Jun", "Jul", "Aug", "Sep", "Okt", "Nov", "Dez",
"Januar", "Februar", "März", "April", "Mai", "Juni", "Juli", "August", "September", "Oktober", "November", "Dezember"
],
AmPm: ["am","pm","AM","PM"],
S: function (j) {return 'ter'},
srcformat: 'Y-m-d',
newformat: 'd.m.Y',
masks: {
ISO8601Long: "Y-m-d H:i:s",
ISO8601Short: "Y-m-d",
ShortDate: "j.n.Y",
LongDate: "l, j. F Y",
FullDateTime: "l, d. F Y G:i:s",
MonthDay: "d. F",
ShortTime: "G:i",
LongTime: "G:i:s",
SortableDateTime: "Y-m-d\\TH:i:s",
UniversalSortableDateTime: "Y-m-d H:i:sO",
YearMonth: "F Y"
},
reformatAfterEdit: false
},
baseLinkUrl: '',
showAction: '',
target: '',
checkbox: {disabled:true},
idName: 'id'
}
};
})(jQuery);
}); | PypiClean |
/HY_sdk-0.2.56-py3-none-any.whl/hivisionai/hycv/utils.py | from PIL import Image
import cv2
import numpy as np
import math
import warnings
import csv
import glob
def cover_mask(image_path, mask_path, alpha=0.85, rate=0.1, if_save=True):
"""
在图片右下角盖上水印
:param image_path:
:param mask_path: 水印路径,以PNG方式读取
:param alpha: 不透明度,默认为0.85
:param rate: 水印比例,越小水印也越小,默认为0.1
:param if_save: 是否将裁剪后的图片保存,如果为True,则保存并返回新图路径,否则不保存,返回截取后的图片对象
:return: 新的图片路径
"""
# 生成新的图片路径,我们默认图片后缀存在且必然包含“.”
path_len = len(image_path)
index = 0
for index in range(path_len - 1, -1, -1):
if image_path[index] == ".":
break
if 3 >= path_len - index >= 6:
raise TypeError("输入的图片格式有误!")
new_path = image_path[0:index] + "_with_mask" + image_path[index:path_len]
# 以png方式读取水印图
mask = Image.open(mask_path).convert('RGBA')
mask_h, mask_w = mask.size
# 以png的方式读取原图
im = Image.open(image_path).convert('RGBA')
# 我采取的策略是,先拷贝一张原图im为base作为基底,然后在im上利用paste函数添加水印
# 此时的水印是完全不透明的,我需要利用blend函数内置参数alpha进行不透明度调整
base = im.copy()
# layer = Image.new('RGBA', im.size, (0, 0, 0, ))
# tmp = Image.new('RGBA', im.size, (0, 0, 0, 0))
h, w = im.size
# 根据原图大小缩放水印图
mask = mask.resize((int(rate*math.sqrt(w*h*mask_h/mask_w)), int(rate*math.sqrt(w*h*mask_w/mask_h))), Image.ANTIALIAS)
mh, mw = mask.size
r, g, b, a = mask.split()
im.paste(mask, (h-mh, w-mw), mask=a)
# im.show()
out = Image.blend(base, im, alpha=alpha).convert('RGB')
# out = Image.alpha_composite(im, layer).convert('RGB')
if if_save:
out.save(new_path)
return new_path
else:
return out
def check_image(image) ->np.ndarray:
"""
判断某一对象是否为图像/矩阵类型,最终返回图像/矩阵
"""
if not isinstance(image, np.ndarray):
image = cv2.imread(image, cv2.IMREAD_UNCHANGED)
return image
def get_box(image) -> list:
"""
这是一个简单的扣图后图像定位函数,不考虑噪点影响
我们使用遍历的方法,碰到非透明点以后立即返回位置坐标
:param image:图像信息,可以是图片路径,也可以是已经读取后的图像
如果传入的是图片路径,我会首先通过读取图片、二值化,然后再进行图像处理
如果传入的是图像,直接处理,不会二值化
:return: 回传一个列表,分别是图像的上下(y)左右(x)自个值
"""
image = check_image(image)
height, width, _ = image.shape
try:
b, g, r, a = cv2.split(image)
# 二值化处理
a = (a > 127).astype(np.int_)
except ValueError:
# 说明传入的是无透明图层的图像,直接返回图像尺寸
warnings.warn("你传入了一张非四通道格式的图片!")
return [0, height, 0, width]
flag1, flag2 = 0, 0
box = [0, 0, 0, 0] # 上下左右
# 采用两面夹击战术,使用flag1和2确定两面的裁剪程度
# 先得到上下
for i in range(height):
for j in range(width):
if flag1 == 0 and a[i][j] != 0:
flag1 = 1
box[0] = i
if flag2 == 0 and a[height - i -1][j] != 0:
flag2 = 1
box[1] = height - i - 1
if flag2 * flag1 == 1:
break
# 再得到左右
flag1, flag2 = 0, 0
for j in range(width):
for i in range(height):
if flag1 == 0 and a[i][j] != 0:
flag1 = 1
box[2] = j
if flag2 == 0 and a[i][width - j - 1] != 0:
flag2 = 1
box[3] = width - j - 1
if flag2 * flag1 == 1:
break
return box
def filtering(img, f, x, y, x_max, y_max, x_min, y_min, area=0, noise_size=50) ->tuple:
"""
filtering将使用递归的方法得到一个连续图像(这个连续矩阵必须得是单通道的)的范围(坐标)
:param img: 传入的矩阵
:param f: 和img相同尺寸的全零矩阵,用于标记递归递归过的点
:param x: 当前递归到的x轴坐标
:param y: 当前递归到的y轴坐标
:param x_max: 递归过程中x轴坐标的最大值
:param y_max: 递归过程中y轴坐标的最大值
:param x_min: 递归过程中x轴坐标的最小值
:param y_min: 递归过程中y轴坐标的最小值
:param area: 当前递归区域面积大小
:param noise_size: 最大递归区域面积大小,当area大于noise_size时,函数返回(0, 1)
:return: 分两种情况,当area大于noise_size时,函数返回(0, 1),当area小于等于noise_size时,函数返回(box, 0)
其中box是连续图像的坐标和像素点面积(上下左右,面积)
理论上来讲,我们可以用这个函数递归出任一图像的形状和坐标,但是从计算机内存、计算速度上考虑,这并不是一个好的选择
所以这个函数一般用于判断和过滤噪点
"""
dire_dir = [(1, 0), (-1, 0), (0, 1), (0, -1), (1, 1), (1, -1), (-1, -1), (-1, 1)]
height, width = img.shape
f[x][y] = 1
for dire in dire_dir:
delta_x, delta_y = dire
tmp_x, tmp_y = (x + delta_x, y + delta_y)
if height > tmp_x >= 0 and width > tmp_y >= 0:
if img[tmp_x][tmp_y] != 0 and f[tmp_x][tmp_y] == 0:
f[tmp_x][tmp_y] = 1
# cv2.imshow("test", f)
# cv2.waitKey(3)
area += 1
if area > noise_size:
return 0, 1
else:
x_max = tmp_x if tmp_x > x_max else x_max
x_min = tmp_x if tmp_x < x_min else x_min
y_max = tmp_y if tmp_y > y_max else y_max
y_min = tmp_y if tmp_y < y_min else y_min
box, flag = filtering(img, f, tmp_x, tmp_y, x_max, y_max, x_min, y_min, area=area, noise_size=noise_size)
if flag == 1:
return 0, 1
else:
(x_max, x_min, y_max, y_min, area) = box
return [x_min, x_max, y_min, y_max, area], 0
def get_box_pro(image: np.ndarray, model: int = 1, correction_factor=None, thresh: int = 127):
"""
本函数能够实现输入一张四通道图像,返回图像中最大连续非透明面积的区域的矩形坐标
本函数将采用opencv内置函数来解析整个图像的mask,并提供一些参数,用于读取图像的位置信息
Args:
image: 四通道矩阵图像
model: 返回值模式
correction_factor: 提供一些边缘扩张接口,输入格式为list或者int:[up, down, left, right]。
举个例子,假设我们希望剪切出的矩形框左边能够偏左1个像素,则输入[0, 0, 1, 0];
如果希望右边偏右1个像素,则输入[0, 0, 0, 1]
如果输入为int,则默认只会对左右两边做拓展,比如输入2,则和[0, 0, 2, 2]是等效的
thresh: 二值化阈值,为了保持一些羽化效果,thresh必须要小
Returns:
model为1时,将会返回切割出的矩形框的四个坐标点信息
model为2时,将会返回矩形框四边相距于原图四边的距离
"""
# ------------ 数据格式规范部分 -------------- #
# 输入必须为四通道
if correction_factor is None:
correction_factor = [0, 0, 0, 0]
if not isinstance(image, np.ndarray) or len(cv2.split(image)) != 4:
raise TypeError("输入的图像必须为四通道np.ndarray类型矩阵!")
# correction_factor规范化
if isinstance(correction_factor, int):
correction_factor = [0, 0, correction_factor, correction_factor]
elif not isinstance(correction_factor, list):
raise TypeError("correction_factor 必须为int或者list类型!")
# ------------ 数据格式规范完毕 -------------- #
# 分离mask
_, _, _, mask = cv2.split(image)
# mask二值化处理
_, mask = cv2.threshold(mask, thresh=thresh, maxval=255, type=0)
contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
temp = np.ones(image.shape, np.uint8)*255
cv2.drawContours(temp, contours, -1, (0, 0, 255), -1)
contours_area = []
for cnt in contours:
contours_area.append(cv2.contourArea(cnt))
idx = contours_area.index(max(contours_area))
x, y, w, h = cv2.boundingRect(contours[idx]) # 框出图像
# ------------ 开始输出数据 -------------- #
height, width, _ = image.shape
y_up = y - correction_factor[0] if y - correction_factor[0] >= 0 else 0
y_down = y + h + correction_factor[1] if y + h + correction_factor[1] < height else height - 1
x_left = x - correction_factor[2] if x - correction_factor[2] >= 0 else 0
x_right = x + w + correction_factor[3] if x + w + correction_factor[3] < width else width - 1
if model == 1:
# model=1,将会返回切割出的矩形框的四个坐标点信息
return [y_up, y_down, x_left, x_right]
elif model == 2:
# model=2, 将会返回矩形框四边相距于原图四边的距离
return [y_up, height - y_down, x_left, width - x_right]
else:
raise EOFError("请选择正确的模式!")
def cut(image_path:str, box:list, if_save=True):
"""
根据box,裁剪对应的图片区域后保存
:param image_path: 原图路径
:param box: 坐标列表,上下左右
:param if_save:是否将裁剪后的图片保存,如果为True,则保存并返回新图路径,否则不保存,返回截取后的图片对象
:return: 新图路径或者是新图对象
"""
index = 0
path_len = len(image_path)
up, down, left, right = box
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
new_image = image[up: down, left: right]
if if_save:
for index in range(path_len - 1, -1, -1):
if image_path[index] == ".":
break
if 3 >= path_len - index >= 6:
raise TypeError("输入的图片格式有误!")
new_path = image_path[0:index] + "_cut" + image_path[index:path_len]
cv2.imwrite(new_path, new_image, [cv2.IMWRITE_PNG_COMPRESSION, 9])
return new_path
else:
return new_image
def zoom_image_without_change_size(image:np.ndarray, zoom_rate, interpolation=cv2.INTER_NEAREST) ->np.ndarray:
"""
在不改变原图大小的情况下,对图像进行放大,目前只支持从图像中心放大
:param image: 传入的图像对象
:param zoom_rate: 放大比例,单位为倍(初始为1倍)
:param interpolation: 插值方式,与opencv的resize内置参数相对应,默认为最近邻插值
:return: 裁剪后的图像实例
"""
height, width, _ = image.shape
if zoom_rate < 1:
# zoom_rate不能小于1
raise ValueError("zoom_rate不能小于1!")
height_tmp = int(height * zoom_rate)
width_tmp = int(width * zoom_rate)
image_tmp = cv2.resize(image, (height_tmp, width_tmp), interpolation=interpolation)
# 定位一下被裁剪的位置,实际上是裁剪框的左上角的点的坐标
delta_x = (width_tmp - width) // 2 # 横向
delta_y = (height_tmp - height) // 2 # 纵向
return image_tmp[delta_y : delta_y + height, delta_x : delta_x + width]
def filedir2csv(scan_filedir, csv_filedir):
file_list = glob.glob(scan_filedir+"/*")
with open(csv_filedir, "w") as csv_file:
writter = csv.writer(csv_file)
for file_dir in file_list:
writter.writerow([file_dir])
print("filedir2csv success!")
def full_ties(image_pre:np.ndarray):
height, width = image_pre.shape
# 先膨胀
kernel = np.ones((5, 5), dtype=np.uint8)
dilate = cv2.dilate(image_pre, kernel, 1)
# cv2.imshow("dilate", dilate)
def FillHole(image):
# 复制 image 图像
im_floodFill = image.copy()
# Mask 用于 floodFill,官方要求长宽+2
mask = np.zeros((height + 2, width + 2), np.uint8)
seedPoint = (0, 0)
# floodFill函数中的seedPoint对应像素必须是背景
is_break = False
for i in range(im_floodFill.shape[0]):
for j in range(im_floodFill.shape[1]):
if (im_floodFill[i][j] == 0):
seedPoint = (i, j)
is_break = True
break
if (is_break):
break
# 得到im_floodFill 255填充非孔洞值
cv2.floodFill(im_floodFill, mask, seedPoint, 255)
# cv2.imshow("tmp1", im_floodFill)
# 得到im_floodFill的逆im_floodFill_inv
im_floodFill_inv = cv2.bitwise_not(im_floodFill)
# cv2.imshow("tmp2", im_floodFill_inv)
# 把image、im_floodFill_inv这两幅图像结合起来得到前景
im_out = image | im_floodFill_inv
return im_out
# 洪流算法填充
image_floodFill = FillHole(dilate)
# 填充图和原图合并
image_final = image_floodFill | image_pre
# 再腐蚀
kernel = np.ones((5, 5), np.uint8)
erosion= cv2.erode(image_final, kernel, iterations=6)
# cv2.imshow("erosion", erosion)
# 添加高斯模糊
blur = cv2.GaussianBlur(erosion, (5, 5), 2.5)
# cv2.imshow("blur", blur)
# image_final = merge_image(image_pre, erosion)
# 再与原图合并
image_final = image_pre | blur
# cv2.imshow("final", image_final)
return image_final
def cut_BiggestAreas(image):
# 裁剪出整张图轮廓最大的部分
def find_BiggestAreas(image_pre):
# 定义一个三乘三的卷积核
kernel = np.ones((3, 3), dtype=np.uint8)
# 将输入图片膨胀
# dilate = cv2.dilate(image_pre, kernel, 3)
# cv2.imshow("dilate", dilate)
# 将输入图片二值化
_, thresh = cv2.threshold(image_pre, 127, 255, cv2.THRESH_BINARY)
# cv2.imshow("thresh", thresh)
# 将二值化后的图片膨胀
dilate_afterThresh = cv2.dilate(thresh, kernel, 5)
# cv2.imshow("thresh_afterThresh", dilate_afterThresh)
# 找轮廓
contours_, hierarchy = cv2.findContours(dilate_afterThresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# 识别出最大的轮廓
# 需要注意的是,在低版本的findContours当中返回的结果是tuple,不支持pop,所以需要将其转为pop
contours = [x for x in contours_]
area = map(cv2.contourArea, contours)
area_list = list(area)
area_max = max(area_list)
post = area_list.index(area_max)
# 将最大的区域保留,其余全部填黑
contours.pop(post)
for i in range(len(contours)):
cv2.drawContours(image_pre, contours, i, 0, cv2.FILLED)
# cv2.imshow("cut", image_pre)
return image_pre
b, g, r, a = cv2.split(image)
a_new = find_BiggestAreas(a)
new_image = cv2.merge((b, g, r, a_new))
return new_image
def locate_neck(image:np.ndarray, proportion):
"""
根据输入的图片(四通道)和proportion(自上而下)的比例,定位到相应的y点,然后向内收缩,直到两边的像素点不透明
"""
if image.shape[-1] != 4:
raise TypeError("请输入一张png格式的四通道图片!")
if proportion > 1 or proportion <=0:
raise ValueError("proportion 必须在0~1之间!")
_, _, _, a = cv2.split(image)
height, width = a.shape
_, a = cv2.threshold(a, 127, 255, cv2.THRESH_BINARY)
y = int(height * proportion)
x = 0
for x in range(width):
if a[y][x] == 255:
break
left = (y, x)
for x in range(width - 1, -1 , -1):
if a[y][x] == 255:
break
right = (y, x)
return left, right, right[1] - left[1]
def get_cutbox_image(input_image):
height, width = input_image.shape[0], input_image.shape[1]
y_top, y_bottom, x_left, x_right = get_box_pro(input_image, model=2)
result_image = input_image[y_top:height - y_bottom, x_left:width - x_right]
return result_image
def brightnessAdjustment(image: np.ndarray, bright_factor: int=0):
"""
图像亮度调节
:param image: 输入的图像矩阵
:param bright_factor:亮度调节因子,可正可负,没有范围限制
当bright_factor ---> +无穷 时,图像全白
当bright_factor ---> -无穷 时,图像全黑
:return: 处理后的图片
"""
res = np.uint8(np.clip(np.int16(image) + bright_factor, 0, 255))
return res
def contrastAdjustment(image: np.ndarray, contrast_factor: int = 0):
"""
图像对比度调节,实际上调节对比度的同时对亮度也有一定的影响
:param image: 输入的图像矩阵
:param contrast_factor:亮度调节因子,可正可负,范围在[-100, +100]之间
当contrast_factor=-100时,图像变为灰色
:return: 处理后的图片
"""
contrast_factor = 1 + min(contrast_factor, 100) / 100 if contrast_factor > 0 else 1 + max(contrast_factor,
-100) / 100
image_b = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
bright_ = image_b.mean()
res = np.uint8(np.clip(contrast_factor * (np.int16(image) - bright_) + bright_, 0, 255))
return res
class CV2Bytes(object):
@staticmethod
def byte_cv2(image_byte, flags=cv2.IMREAD_COLOR) ->np.ndarray:
"""
将传入的字节流解码为图像, 当flags为 -1 的时候为无损解码
"""
np_arr = np.frombuffer(image_byte,np.uint8)
image = cv2.imdecode(np_arr, flags)
return image
@staticmethod
def cv2_byte(image:np.ndarray, imageType:str=".jpg"):
"""
将传入的图像解码为字节流
"""
_, image_encode = cv2.imencode(imageType, image)
image_byte = image_encode.tobytes()
return image_byte
def comb2images(src_white:np.ndarray, src_black:np.ndarray, mask:np.ndarray) -> np.ndarray:
"""输入两张图片,将这两张图片根据输入的mask进行叠加处理
这里并非简单的cv2.add(),因为也考虑了羽化部分,所以需要进行一些其他的处理操作
核心的算法为: dst = (mask * src_white + (1 - mask) * src_black).astype(np.uint8)
Args:
src_white (np.ndarray): 第一张图像,代表的是mask中的白色区域,三通道
src_black (np.ndarray): 第二张图像,代表的是mask中的黑色区域,三通道
mask (np.ndarray): mask.输入为单通道,后续会归一化并转为三通道
需要注意的是这三者的尺寸应该是一样的
Returns:
np.ndarray: 返回的三通道图像
"""
# 函数内部不检查相关参数是否一样,使用的时候需要注意一下
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR).astype(np.float32) / 255
return (mask * src_white + (1 - mask) * src_black).astype(np.uint8) | PypiClean |
/LDB_Inventory_Barcode-0.14.1.tar.gz/LDB_Inventory_Barcode-0.14.1/docs/supported-formats.rst | Supported Formats
=========================
The following are the supported barcode formats.
PRs for other code formats are welcome!
Code 39
-------
.. inheritance-diagram:: barcode.codex.Code39
:parts: 1
.. autoclass:: barcode.codex.Code39
:members:
Code 128
--------
.. versionadded:: 0.8beta1
.. inheritance-diagram:: barcode.codex.Code128
:parts: 1
.. autoclass:: barcode.codex.Code128
:members:
PZN7 (aka: PZN)
---------------
.. inheritance-diagram:: barcode.codex.PZN7
:parts: 1
.. autoclass:: barcode.codex.PZN7
:members:
EAN-13
------
.. inheritance-diagram:: barcode.ean.EuropeanArticleNumber13
:parts: 1
.. autoclass:: barcode.ean.EuropeanArticleNumber13
:members:
EAN-8
-----
.. inheritance-diagram:: barcode.ean.EuropeanArticleNumber8
:parts: 1
.. autoclass:: barcode.ean.EuropeanArticleNumber8
:members:
JAN
---
.. inheritance-diagram:: barcode.ean.JapanArticleNumber
:parts: 1
.. autoclass:: barcode.ean.JapanArticleNumber
:members:
ISBN-13
-------
.. inheritance-diagram:: barcode.isxn.InternationalStandardBookNumber13
:parts: 1
.. autoclass:: barcode.isxn.InternationalStandardBookNumber13
:members:
ISBN-10
-------
.. inheritance-diagram:: barcode.isxn.InternationalStandardBookNumber10
:parts: 1
.. autoclass:: barcode.isxn.InternationalStandardBookNumber10
:members:
ISSN
----
.. inheritance-diagram:: barcode.isxn.InternationalStandardSerialNumber
:parts: 1
.. autoclass:: barcode.isxn.InternationalStandardSerialNumber
:members:
UPC-A
-----
.. inheritance-diagram:: barcode.upc.UniversalProductCodeA
:parts: 1
.. autoclass:: barcode.upc.UniversalProductCodeA
:members:
EAN14
-----
.. inheritance-diagram:: barcode.ean.EuropeanArticleNumber14
:parts: 1
.. autoclass:: barcode.ean.EuropeanArticleNumber14
:members:
GS1-128
-------
.. versionadded:: v0.10.0
.. inheritance-diagram:: barcode.codex.Gs1_128
:parts: 1
.. autoclass:: barcode.codex.Gs1_128
:members:
| PypiClean |
/Flask-AppBuilder-jack-3.3.4.tar.gz/Flask-AppBuilder-jack-3.3.4/flask_appbuilder/upload.py | from flask_babel import gettext
from werkzeug.datastructures import FileStorage
from wtforms import fields, ValidationError
from wtforms.widgets import html_params, HTMLString
from .filemanager import FileManager, ImageManager
try:
from wtforms.fields.core import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
"""
Based and thanks to
https://github.com/mrjoes/flask-admin/blob/master/flask_admin/form/upload.py
"""
class BS3FileUploadFieldWidget(object):
empty_template = (
'<div class="input-group">'
'<span class="input-group-addon"><i class="fa fa-upload"></i>'
"</span>"
'<input class="form-control" %(file)s/>'
"</div>"
)
data_template = (
"<div>"
" <input %(text)s>"
' <input type="checkbox" name="%(marker)s">Delete</input>'
"</div>"
'<div class="input-group">'
'<span class="input-group-addon"><i class="fa fa-upload"></i>'
"</span>"
'<input class="form-control" %(file)s/>'
"</div>"
)
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
template = self.data_template if field.data else self.empty_template
return HTMLString(
template
% {
"text": html_params(type="text", value=field.data),
"file": html_params(type="file", **kwargs),
"marker": "_%s-delete" % field.name,
}
)
class BS3ImageUploadFieldWidget(object):
empty_template = (
'<div class="input-group">'
'<span class="input-group-addon"><span class="glyphicon glyphicon-upload"></span>'
"</span>"
'<input class="form-control" %(file)s/>'
"</div>"
)
data_template = (
'<div class="thumbnail">'
" <img %(image)s>"
' <input type="checkbox" name="%(marker)s">Delete</input>'
"</div>"
'<div class="input-group">'
'<span class="input-group-addon"><span class="glyphicon glyphicon-upload"></span>'
"</span>"
'<input class="form-control" %(file)s/>'
"</div>"
)
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
args = {
"file": html_params(type="file", **kwargs),
"marker": "_%s-delete" % field.name,
}
if field.data:
url = self.get_url(field)
args["image"] = html_params(src=url)
template = self.data_template
else:
template = self.empty_template
return HTMLString(template % args)
def get_url(self, field):
im = ImageManager()
return im.get_url(field.data)
# Fields
class FileUploadField(fields.TextField):
"""
Customizable file-upload field.
Saves file to configured path, handles updates and deletions.
Inherits from `TextField`, resulting filename will be stored as string.
"""
widget = BS3FileUploadFieldWidget()
def __init__(self, label=None, validators=None, filemanager=None, **kwargs):
"""
Constructor.
:param label:
Display label
:param validators:
Validators
"""
self.filemanager = filemanager or FileManager()
self._should_delete = False
super(FileUploadField, self).__init__(label, validators, **kwargs)
def process_on_delete(self, obj):
"""Override this method to make customised updates to the object
when the stored file is going to be deleted."""
pass
def process_on_store(self, obj, byte_stream):
"""Override this method to make customised updates to the object
when a file is going to be stored.
This may be used to parse file content and extract values for
additional fields.
Note: as populate_obj() on form fields my be called in an arbitrary
order, do not assume that other fields in obj have been correctly set.
If an extra information (from other fields) is necessary for parsing
the supplied file content, a form-field validator may be used to copy
it directly from the form to this field.
:param obj: model object
:param byte_stream: file contents
"""
pass
def pre_validate(self, form):
if (
self.data
and isinstance(self.data, FileStorage)
and not self.filemanager.is_file_allowed(self.data.filename)
):
raise ValidationError(gettext("Invalid file extension"))
def process(self, formdata, data=unset_value):
if formdata:
marker = "_%s-delete" % self.name
if marker in formdata:
self._should_delete = True
return super(FileUploadField, self).process(formdata, data)
def populate_obj(self, obj, name):
field = getattr(obj, name, None)
if field:
# If field should be deleted, clean it up
if self._should_delete:
self.process_on_delete(obj)
self.filemanager.delete_file(field)
setattr(obj, name, None)
return
if self.data and isinstance(self.data, FileStorage):
if field:
self.process_on_delete(obj)
self.filemanager.delete_file(field)
position = self.data.stream.tell()
self.process_on_store(obj, self.data.stream)
self.data.stream.seek(position)
filename = self.filemanager.generate_name(obj, self.data)
filename = self.filemanager.save_file(self.data, filename)
setattr(obj, name, filename)
class ImageUploadField(fields.StringField):
"""
Image upload field.
"""
widget = BS3ImageUploadFieldWidget()
def __init__(self, label=None, validators=None, imagemanager=None, **kwargs):
self.imagemanager = imagemanager or ImageManager()
self._should_delete = False
super(ImageUploadField, self).__init__(label, validators, **kwargs)
def pre_validate(self, form):
if (
self.data
and isinstance(self.data, FileStorage)
and not self.imagemanager.is_file_allowed(self.data.filename)
):
raise ValidationError(gettext("Invalid file extension"))
def process(self, formdata, data=unset_value):
if formdata:
marker = "_%s-delete" % self.name
if marker in formdata:
self._should_delete = True
return super(ImageUploadField, self).process(formdata, data)
def populate_obj(self, obj, name):
field = getattr(obj, name, None)
size = obj.__mapper__.columns[name].type.size
thumbnail_size = obj.__mapper__.columns[name].type.thumbnail_size
if field:
# If field should be deleted, clean it up
if self._should_delete:
self.imagemanager.delete_file(field)
setattr(obj, name, None)
return
if self.data and isinstance(self.data, FileStorage):
if field:
self.imagemanager.delete_file(field)
filename = self.imagemanager.generate_name(obj, self.data)
filename = self.imagemanager.save_file(
self.data, filename, size, thumbnail_size
)
setattr(obj, name, filename) | PypiClean |
/Office365_REST_with_timeout-0.1.1-py3-none-any.whl/office365/sharepoint/permissions/roleAssignmentCollection.py | from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.runtime.resource_path_service_operation import ResourcePathServiceOperation
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.permissions.role_assignment import RoleAssignment
class RoleAssignmentCollection(BaseEntityCollection):
"""Represents a collection of RoleAssignment resources."""
def __init__(self, context, resource_path=None):
super(RoleAssignmentCollection, self).__init__(context, RoleAssignment, resource_path)
def __getitem__(self, index_or_principal_id):
"""
:param int or str index_or_principal_id: key is used to address a RoleAssignment resource by either an index
in collection or by resource id"""
if type(index_or_principal_id) == int:
return super(RoleAssignmentCollection, self).__getitem__(index_or_principal_id)
return self._item_type(self.context,
ResourcePath(index_or_principal_id, self.resource_path))
def get_by_principal_id(self, principal_id):
"""Retrieves the role assignment object (1) based on the specified user or group.
:param int principal_id: Specifies the user or group of the role assignment.
"""
role_assignment = RoleAssignment(self.context,
ResourcePathServiceOperation("GetByPrincipalId",
[principal_id],
self.resource_path))
self.context.load(role_assignment)
return role_assignment
def add_role_assignment(self, principal_id, role_def_id):
"""Adds a role assignment to the role assignment collection.<81>
:param int role_def_id: Specifies the role definition of the role assignment.
:param int principal_id: Specifies the user or group of the role assignment.
"""
payload = {
"principalId": principal_id,
"roleDefId": role_def_id
}
qry = ServiceOperationQuery(self, "AddRoleAssignment", payload, None, None, None)
self.context.add_query(qry)
def remove_role_assignment(self, principal_id, role_def_id):
"""Removes the role assignment with the specified principal and role definition from the collection.
:param int role_def_id: The ID of the role definition in the role assignment.
:param int principal_id: The ID of the user or group in the role assignment.
"""
payload = {
"principalId": principal_id,
"roleDefId": role_def_id
}
qry = ServiceOperationQuery(self, "RemoveRoleAssignment", payload, None, None, None)
self.context.add_query(qry) | PypiClean |
/NeodroidVision-0.3.0-py36-none-any.whl/neodroidvision/classification/loss_functions/center_loss.py |
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
"""
import torch
from torch import nn
from torch.autograd.function import Function
__all__ = ["CenterLoss", "CenterLossFunc"]
class CenterLoss(nn.Module):
"""description"""
def __init__(self, num_classes, feat_dim, size_average=True):
super(CenterLoss, self).__init__()
self.centers = nn.Parameter(
torch.randn(num_classes, feat_dim), requires_grad=True
)
self.center_loss_func = CenterLossFunc.apply
self.feat_dim = feat_dim
self.size_average = size_average
def forward(self, label: torch.Tensor, feat: torch.Tensor) -> torch.Tensor:
"""
:param label:
:type label:
:param feat:
:type feat:
:return:
:rtype:"""
batch_size = feat.size(0)
feat = feat.reshape(batch_size, -1)
# To check the dim of centers and features
if feat.size(1) != self.feat_dim:
raise ValueError(
f"Center's dim: {self.feat_dim} should be equal to input feature's \
dim: {feat.size(1)}"
)
batch_size_tensor = feat.new_empty(1).fill_(
batch_size if self.size_average else 1
)
loss = self.center_loss_func(feat, label, self.centers, batch_size_tensor)
return loss
class CenterLossFunc(Function):
@staticmethod
def forward(ctx, feature, label, centers, batch_size) -> torch.Tensor:
"""
:param ctx:
:type ctx:
:param feature:
:type feature:
:param label:
:type label:
:param centers:
:type centers:
:param batch_size:
:type batch_size:
:return:
:rtype:"""
ctx.save_for_backward(feature, label, centers, batch_size)
centers_batch = centers.index_select(0, label.long())
return (feature - centers_batch).pow(2).sum() / 2.0 / batch_size
@staticmethod
def backward(ctx, grad_output):
"""
:param ctx:
:type ctx:
:param grad_output:
:type grad_output:
:return:
:rtype:"""
feature, label, centers, batch_size = ctx.saved_tensors
centers_batch = centers.index_select(0, label.long())
diff = centers_batch - feature
# init every iteration
counts = centers.new_ones(centers.size(0))
ones = centers.new_ones(label.size(0))
grad_centers = centers.new_zeros(centers.size())
counts = counts.scatter_add_(0, label.long(), ones)
grad_centers.scatter_add_(
0, label.unsqueeze(1).expand(feature.size()).long(), diff
)
grad_centers = grad_centers / counts.reshape(-1, 1)
return -grad_output * diff / batch_size, None, grad_centers / batch_size, None
if __name__ == "__main__":
def main():
"""description"""
from draugr.torch_utilities import global_torch_device
torch.manual_seed(999)
print("-" * 80)
ct = CenterLoss(10, 2, size_average=True).to(global_torch_device())
y = torch.Tensor([0, 0, 2, 1]).to(global_torch_device())
feat = torch.zeros(4, 2).to(global_torch_device()).requires_grad_()
print(list(ct.parameters()))
print(ct.centers.grad)
out = ct(y, feat)
print(out.item())
out.backward()
print(ct.centers.grad)
print(feat.grad)
main() | PypiClean |
/Fo4doG_mess_client-0.0.2.tar.gz/Fo4doG_mess_client-0.0.2/client/client/add_contact.py | from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDialog, QLabel, QComboBox, QPushButton
import logging
logger = logging.getLogger('client')
class AddContactDialog(QDialog):
'''
Диалог добавления пользователя в список контактов.
Предлагает пользователю список возможных контактов и
добавляет выбранный в контакты.
'''
def __init__(self, transport, database):
super().__init__()
self.transport = transport
self.database = database
self.setFixedSize(350, 120)
self.setWindowTitle('Выберите контакт для добавления:')
self.setAttribute(Qt.WA_DeleteOnClose)
self.setModal(True)
self.selector_label = QLabel('Выберите контакт для добавления:', self)
self.selector_label.setFixedSize(200, 20)
self.selector_label.move(10, 0)
self.selector = QComboBox(self)
self.selector.setFixedSize(200, 20)
self.selector.move(10, 30)
self.btn_refresh = QPushButton('Обновить список', self)
self.btn_refresh.setFixedSize(100, 30)
self.btn_refresh.move(60, 60)
self.btn_ok = QPushButton('Добавить', self)
self.btn_ok.setFixedSize(100, 30)
self.btn_ok.move(230, 20)
self.btn_cancel = QPushButton('Отмена', self)
self.btn_cancel.setFixedSize(100, 30)
self.btn_cancel.move(230, 60)
self.btn_cancel.clicked.connect(self.close)
# Заполняем список возможных контактов
self.possible_contacts_update()
# Назначаем действие на кнопку обновить
self.btn_refresh.clicked.connect(self.update_possible_contacts)
def possible_contacts_update(self):
'''
Метод заполнения списка возможных контактов.
Создаёт список всех зарегистрированных пользователей
за исключением уже добавленных в контакты и самого себя.
'''
self.selector.clear()
# множества всех контактов и контактов клиента
contacts_list = set(self.database.get_contacts())
users_list = set(self.database.get_users())
# Удалим сами себя из списка пользователей, чтобы нельзя было добавить
# самого себя
users_list.remove(self.transport.username)
# Добавляем список возможных контактов
self.selector.addItems(users_list - contacts_list)
def update_possible_contacts(self):
'''
Метод обновления списка возможных контактов. Запрашивает с сервера
список известных пользователей и обносляет содержимое окна.
'''
try:
self.transport.user_list_update()
except OSError:
pass
else:
logger.debug('Обновление списка пользователей с сервера выполнено')
self.possible_contacts_update() | PypiClean |
/CsuPTMD-1.0.12.tar.gz/CsuPTMD-1.0.12/PTMD/maskrcnn_benchmark/apex/apex/pyprof/prof/conv.py | from collections import OrderedDict
from .utility import Utility
from .base import OperatorLayerBase
class Conv(OperatorLayerBase):
"""
# N = batch size
# C,H,W = input channels, height, width
# K,P,Q = output channels, height, width
# R,S = filter height, width
# g = groups
"""
#todo: refine winograd and FFT
convAuxList = ["nchwToNhwc", "nhwcToNchw", "OffsetsKernel",]
winoAuxList = ["generateWinogradTilesKernel", "winogradWgradData", "winogradWgradOutput", "winogradWgradDelta"]
fftAuxList = ["compute_gemm_pointers", "flip_filter", "fft2d_r2c_", "fft2d_c2r_", "fft1d_r2c", "fft1d_c2r"]
miscAuxList = ["scaleTensor_kernel",]
convList = ["_s884cudnn_", "_s1688cudnn_", "_scudnn_", "2d_grouped_direct_kernel", "cudnn::detail::implicit_convolve_sgemm", "cudnn::detail::dgrad2d_alg1_1", "cudnn::detail::wgrad_alg0_engine", "cudnn::detail::dgrad_engine", "dgrad_1x1_stride_2x2", "spatialDepthwiseConvolutionUpdateOutput"]
winoList = ["winograd3x3Kernel", "_sgemm_"]
fftList = ["fermiPlusCgemmLDS128_batched", "_gcgemm_",]
miscList = []
def __init__(self, d):
marker = eval(d.argMarker[0])
mod = marker['mod']
op = marker['op']
args = marker['args']
self.marker = marker
self.mod_ = mod
self.op_ = op
self.args = args
self.dir = d.dir
self.name = d.name
self.sub = d.sub
assert (mod == "torch.nn.functional")
assert (op in ["conv1d", "conv2d"])
length = len(args)
assert (length >= 2) and (length <= 7)
i,w = args[0], args[1]
assert (i['type'] == "tensor")
assert (w['type'] == "tensor")
#ignore bias
if (length >= 4) and (args[3]['name'] == ""):
s = args[3]
elif any(x['name'] == 'stride' for x in args):
s = list(filter(lambda x : x['name'] == 'stride', args))[0]
else:
s = {'name': 'stride', 'type': 'int', 'value': 1}
if (length >= 5) and (args[4]['name'] == ""):
p = args[4]
elif any(x['name'] == 'padding' for x in args):
p = list(filter(lambda x : x['name'] == 'padding', args))[0]
else:
p = {'name': 'padding', 'type': 'int', 'value': 0}
if (length >= 6) and (args[5]['name'] == ""):
d = args[5]
elif any(x['name'] == 'dilation' for x in args):
d = list(filter(lambda x : x['name'] == 'dilation', args))[0]
else:
d = {'name': 'dilation', 'type': 'int', 'value': 1}
if (length == 7) and (args[6]['name'] == ""):
g = args[6]
elif any(x['name'] == 'groups' for x in args):
g = list(filter(lambda x : x['name'] == 'groups', args))[0]
else:
g = {'name': 'groups', 'type': 'int', 'value': 1}
if op == "conv1d":
assert (len(i['shape']) == 3)
assert (len(w['shape']) == 3)
assert (i['dtype'] == w['dtype'])
N, C1, W = i['shape']
K, C2, S = w['shape']
assert (C1 == C2)
p = p['value'] if Utility.isscalar(p['type']) else p['value'][0]
s = s['value'] if Utility.isscalar(s['type']) else s['value'][0]
d = d['value'] if Utility.isscalar(d['type']) else d['value'][0]
g = g['value']
assert (g == 1)
H = 1
R = 1
P = 1 + (H - (((R-1))+1))
Q = 1 + (W + 2*p - (((S-1)*d)+1))/s
P = int(P)
Q = int(Q)
if (H == 1):
assert (P == 1)
if (W == 1):
assert (Q == 1)
self.N = N
self.C = C1
self.H = H
self.W = W
self.K = K
self.P = P
self.Q = Q
self.R = R
self.S = S
self.ph = 0
self.pw = p
self.U = 1
self.V = s
self.dh = 1
self.dw = d
self.g = g
self.type = i['dtype']
elif op == "conv2d":
assert (len(i['shape']) == 4)
assert (len(w['shape']) == 4)
assert (i['dtype'] == w['dtype'])
N, C1, H, W = i['shape']
K, C2, R, S = w['shape']
if Utility.isscalar(p['type']):
ph = pw = p['value']
else:
assert (p['type'] == "tuple")
ph, pw = p['value']
if Utility.isscalar(s['type']):
sh = sw = s['value']
else:
assert (s['type'] == "tuple")
sh, sw = s['value']
if Utility.isscalar(d['type']):
dh = dw = d['value']
else:
assert (d['type'] == "tuple")
dh, dw = d['value']
g = g['value']
assert (g >= 1)
assert (C1 == C2*g)
P = 1 + (H + 2*ph - (((R-1)*dh)+1))/sh
Q = 1 + (W + 2*pw - (((S-1)*dw)+1))/sw
P = int(P)
Q = int(Q)
if (H == 1):
assert (P == 1)
if (W == 1):
assert (Q == 1)
self.N = N
self.C = C1
self.H = H
self.W = W
self.K = K
self.P = P
self.Q = Q
self.R = R
self.S = S
self.ph = ph
self.pw = pw
self.U = sh
self.V = sw
self.dh = dh
self.dw = dw
self.g = g
self.type = i['dtype']
else:
assert False
def params(self):
p = OrderedDict([('N',self.N), ('C',self.C), ('H',self.H), ('W',self.W), ('K',self.K), ('P',self.P), ('Q',self.Q), ('R',self.R), ('S',self.S), ('ph',self.ph), ('pw',self.pw), ('U',self.U), ('V',self.V), ('dh',self.dh), ('dw',self.dw), ('g',self.g), ('type',self.type)])
return p
def conv_bytes_flops(self, N, C, H, W, K, P, Q, R, S, g, t):
f = 2*N*K*P*Q*C*R*S/g #for fprop
elems = N*C*H*W + K*C*R*S/g + N*K*P*Q
b = elems * Utility.typeToBytes(t)
return b,f
def bytes_flops(self):
N,C,H,W,K,P,Q,R,S,ph,pw,U,V,dh,dw,g,t = self.params().values()
if any(x in self.name for x in Conv.convAuxList+Conv.winoAuxList+Conv.fftAuxList+Conv.miscAuxList):
bytes, flops = [0, 0]
elif any(x in self.name for x in Conv.convList+Conv.winoList+Conv.fftList+Conv.miscList):
if g == 1:
bytes, flops = self.conv_bytes_flops(N,C,H,W,K,P,Q,R,S,g,t)
else:
if "2d_grouped_direct_kernel" in self.name: #only 1 kernel is called
bytes, flops = self.conv_bytes_flops(N,C,H,W,K,P,Q,R,S,g,t)
elif "spatialDepthwiseConvolutionUpdateOutput" in self.name: #one kernel for separable conv
bytes, flops = self.conv_bytes_flops(N,C,H,W,K,P,Q,R,S,g,t)
else: #a kernel per group is called
bytes, flops = self.conv_bytes_flops(N,C/g,H,W,K/g,P,Q,R,S,1,t)
elif ("calc_bias_diff" in self.name): #bias gradient
elems = N*K*P*Q
flops = elems
bytes = 2 * elems * Utility.typeToBytes(t)
#params = OrderedDict([('N',N), ('K',K), ('P',P), ('Q',Q), ('type', t)])
else:
bytes, flops = [0, 0]
return bytes, flops
def bytes(self):
b,_ = self.bytes_flops()
return b
def flops(self):
_,f = self.bytes_flops()
return f
def tc(self):
for s in ["884cudnn", "1688cudnn"]:
if s in self.name:
return 1
return "-"
def op(self):
return self.op_
def mod(self):
return self.mod_ | PypiClean |
/ERAlchemy-1.2.10.tar.gz/ERAlchemy-1.2.10/eralchemy/sqla.py | from eralchemy.models import Relation, Column, Table
import sys
from sqlalchemy.exc import CompileError
if sys.version_info[0] == 3:
unicode = str
def relation_to_intermediary(fk):
"""Transform an SQLAlchemy ForeignKey object to it's intermediary representation. """
return Relation(
right_col=format_name(fk.parent.table.fullname),
left_col=format_name(fk._column_tokens[1]),
right_cardinality='?',
left_cardinality='*',
)
def format_type(typ):
""" Transforms the type into a nice string representation. """
try:
return unicode(typ)
except CompileError:
return 'Null'
def format_name(name):
""" Transforms the name into a nice string representation. """
return unicode(name)
def column_to_intermediary(col, type_formatter=format_type):
"""Transform an SQLAlchemy Column object to it's intermediary representation. """
return Column(
name=col.name,
type=type_formatter(col.type),
is_key=col.primary_key,
)
def table_to_intermediary(table):
"""Transform an SQLAlchemy Table object to it's intermediary representation. """
return Table(
name=table.fullname,
columns=[column_to_intermediary(col) for col in table.c._data.values()]
)
def metadata_to_intermediary(metadata):
""" Transforms SQLAlchemy metadata to the intermediary representation. """
tables = [table_to_intermediary(table) for table in metadata.tables.values()]
relationships = [relation_to_intermediary(fk) for table in metadata.tables.values() for fk in table.foreign_keys]
return tables, relationships
def declarative_to_intermediary(base):
""" Transform an SQLAlchemy Declarative Base to the intermediary representation. """
return metadata_to_intermediary(base.metadata)
def name_for_scalar_relationship(base, local_cls, referred_cls, constraint):
""" Overriding naming schemes. """
name = referred_cls.__name__.lower() + "_ref"
return name
def database_to_intermediary(database_uri, schema=None):
""" Introspect from the database (given the database_uri) to create the intermediary representation. """
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
Base = automap_base()
engine = create_engine(database_uri)
if schema is not None:
Base.metadata.schema = schema
# reflect the tables
Base.prepare(engine, reflect=True, name_for_scalar_relationship=name_for_scalar_relationship)
return declarative_to_intermediary(Base) | PypiClean |
/Geode_GEM-0.12.0-py3-none-any.whl/geode_gem/widgets/stack.py |
# Geode
from geode_gem.widgets.common import GeodeGtkCommon
from geode_gem.widgets.misc import GeodeGtkLabel
# GObject
from gi.repository import Gtk
# ------------------------------------------------------------------------------
# Class
# ------------------------------------------------------------------------------
class GeodeGtkStackSidebar(GeodeGtkCommon, Gtk.Box):
def __init__(self, *args, **kwargs):
""" Constructor
"""
GeodeGtkCommon.__init__(self, Gtk.StackSidebar, **kwargs)
self.sidebar = Gtk.StackSidebar.new()
self.stack = kwargs.get("use_stack", Gtk.Stack())
self.stack.set_transition_type(Gtk.StackTransitionType.NONE)
self.sidebar.set_stack(self.stack)
for element in args:
self.append_widget(element)
self.stack.add_titled(element, element.identifier, element.title)
self.pack_start(self.sidebar, False, False, 0)
self.pack_start(self.stack, True, True, 0)
class GeodeGtkStackView(GeodeGtkCommon, Gtk.ScrolledWindow):
def __init__(self, title, *args, **kwargs):
""" Constructor
"""
GeodeGtkCommon.__init__(self, Gtk.ScrolledWindow, **kwargs)
self.inner_viewport = Gtk.Viewport.new(None, None)
self.inner_grid = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
self.title = title
# ------------------------------------
# Properties
# ------------------------------------
self.inner_grid.set_border_width(16)
# ------------------------------------
# Packing
# ------------------------------------
for element in args:
self.append_widget(element)
self.inner_grid.pack_start(
element,
getattr(element, "is_fillable", False),
getattr(element, "is_expandable", False),
0)
self.add(self.inner_viewport)
self.inner_viewport.add(self.inner_grid)
class GeodeGtkStackSection(GeodeGtkCommon, Gtk.Box):
__setters__ = {
"set_orientation": Gtk.Orientation.VERTICAL,
"set_homogeneous": False,
"set_margin_bottom": 12,
"set_spacing": 6,
}
def __init__(self, title, *args, **kwargs):
""" Constructor
"""
GeodeGtkCommon.__init__(self, Gtk.Box, **kwargs)
label_title = GeodeGtkLabel(identifier="title",
set_markup=f"<b>{title}</b>",
set_style=Gtk.STYLE_CLASS_DIM_LABEL)
# Packing
self.pack_start(label_title, False, False, 0)
for element in args:
self.append_widget(element)
self.pack_start(element, True, True, 0)
class GeodeGtkStackOption(GeodeGtkCommon, Gtk.Box):
__setters__ = {
"set_orientation": Gtk.Orientation.HORIZONTAL,
"set_homogeneous": False,
"set_spacing": 12,
}
def __init__(self, label, *args, **kwargs):
""" Constructor
"""
GeodeGtkCommon.__init__(self, Gtk.Box, **kwargs)
label_option = GeodeGtkLabel(identifier="label",
set_alignment=(1, 0.5),
set_style=Gtk.STYLE_CLASS_DIM_LABEL,
set_text=f"{label}")
# Packing
self.pack_start(label_option, False, False, 0)
for element in args:
self.append_widget(element)
self.pack_start(element, True, True, 0) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/aiohttp/http_exceptions.py | from typing import Optional, Union
from .typedefs import _CIMultiDict
__all__ = ('HttpProcessingError',)
class HttpProcessingError(Exception):
"""HTTP error.
Shortcut for raising HTTP errors with custom code, message and headers.
code: HTTP Error code.
message: (optional) Error message.
headers: (optional) Headers to be sent in response, a list of pairs
"""
code = 0
message = ''
headers = None
def __init__(self, *,
code: Optional[int]=None,
message: str='',
headers: Optional[_CIMultiDict]=None) -> None:
if code is not None:
self.code = code
self.headers = headers
self.message = message
super().__init__("%s, message='%s'" % (self.code, message))
class BadHttpMessage(HttpProcessingError):
code = 400
message = 'Bad Request'
def __init__(self, message: str, *,
headers: Optional[_CIMultiDict]=None) -> None:
super().__init__(message=message, headers=headers)
class HttpBadRequest(BadHttpMessage):
code = 400
message = 'Bad Request'
class PayloadEncodingError(BadHttpMessage):
"""Base class for payload errors"""
class ContentEncodingError(PayloadEncodingError):
"""Content encoding error."""
class TransferEncodingError(PayloadEncodingError):
"""transfer encoding error."""
class ContentLengthError(PayloadEncodingError):
"""Not enough data for satisfy content length header."""
class LineTooLong(BadHttpMessage):
def __init__(self, line: str,
limit: str='Unknown',
actual_size: str='Unknown') -> None:
super().__init__(
"Got more than %s bytes (%s) when reading %s." % (
limit, actual_size, line))
class InvalidHeader(BadHttpMessage):
def __init__(self, hdr: Union[bytes, str]) -> None:
if isinstance(hdr, bytes):
hdr = hdr.decode('utf-8', 'surrogateescape')
super().__init__('Invalid HTTP Header: {}'.format(hdr))
self.hdr = hdr
class BadStatusLine(BadHttpMessage):
def __init__(self, line: str='') -> None:
if not line:
line = repr(line)
self.args = line,
self.line = line
class InvalidURLError(BadHttpMessage):
pass | PypiClean |
/MedPy-0.4.0.tar.gz/MedPy-0.4.0/bin/medpy_gradient.py | # build-in modules
import argparse
import logging
# third-party modules
import scipy
from scipy.ndimage.filters import generic_gradient_magnitude, prewitt
# path changes
# own modules
from medpy.io import load, save
from medpy.core import Logger
# information
__author__ = "Oskar Maier"
__version__ = "r0.2.0, 2011-12-12"
__email__ = "[email protected]"
__status__ = "Release"
__description__ = """
Creates a height map of the input images using the gradient magnitude
filter.
The pixel type of the resulting image will be float.
Copyright (C) 2013 Oskar Maier
This program comes with ABSOLUTELY NO WARRANTY; This is free software,
and you are welcome to redistribute it under certain conditions; see
the LICENSE file or <http://www.gnu.org/licenses/> for details.
"""
# code
def main():
# parse cmd arguments
parser = getParser()
parser.parse_args()
args = getArguments(parser)
# prepare logger
logger = Logger.getInstance()
if args.debug: logger.setLevel(logging.DEBUG)
elif args.verbose: logger.setLevel(logging.INFO)
# laod input image
data_input, header_input = load(args.input)
# # check if output image exists
# if not args.force:
# if os.path.exists(image_gradient_name):
# logger.warning('The output image {} already exists. Skipping this step.'.format(image_gradient_name))
# continue
# prepare result image
data_output = scipy.zeros(data_input.shape, dtype=scipy.float32)
# apply the gradient magnitude filter
logger.info('Computing the gradient magnitude with Prewitt operator...')
generic_gradient_magnitude(data_input, prewitt, output=data_output) # alternative to prewitt is sobel
# save resulting mask
save(data_output, args.output, header_input, args.force)
logger.info('Successfully terminated.')
def getArguments(parser):
"Provides additional validation of the arguments collected by argparse."
return parser.parse_args()
def getParser():
"Creates and returns the argparse parser object."
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument('input', help='Source volume.')
parser.add_argument('output', help='Target volume.')
parser.add_argument('-v', dest='verbose', action='store_true', help='Display more information.')
parser.add_argument('-d', dest='debug', action='store_true', help='Display debug information.')
parser.add_argument('-f', dest='force', action='store_true', help='Silently override existing output images.')
return parser
if __name__ == "__main__":
main() | PypiClean |
/Flask-User-pt-0.6.21.tar.gz/Flask-User-pt-0.6.21/flask_user/views.py | from datetime import datetime
from flask import current_app, flash, redirect, request, url_for
from flask_login import current_user, login_user, logout_user
from .decorators import confirm_email_required, login_required
from . import emails
from . import signals
from .translations import gettext as _
# Python version specific imports
from sys import version_info as py_version
is_py2 = (py_version[0] == 2) #: Python 2.x?
is_py3 = (py_version[0] == 3) #: Python 3.x?
if is_py2:
from urlparse import urlsplit, urlunsplit
from urllib import quote, unquote
if is_py3:
from urllib.parse import urlsplit, urlunsplit
from urllib.parse import quote, unquote
def _call_or_get(function_or_property):
return function_or_property() if callable(function_or_property) else function_or_property
def render(*args, **kwargs):
user_manager = current_app.user_manager
return user_manager.render_function(*args, **kwargs)
def confirm_email(token):
""" Verify email confirmation token and activate the user account."""
# Verify token
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
is_valid, has_expired, object_id = user_manager.verify_token(
token,
user_manager.confirm_email_expiration)
if has_expired:
flash(_('Your confirmation token has expired.'), 'error')
return redirect(url_for('user.login'))
if not is_valid:
flash(_('Invalid confirmation token.'), 'error')
return redirect(url_for('user.login'))
# Confirm email by setting User.confirmed_at=utcnow() or UserEmail.confirmed_at=utcnow()
user = None
if db_adapter.UserEmailClass:
user_email = user_manager.get_user_email_by_id(object_id)
if user_email:
user_email.confirmed_at = datetime.utcnow()
user = user_email.user
else:
user_email = None
user = user_manager.get_user_by_id(object_id)
if user:
user.confirmed_at = datetime.utcnow()
if user:
user.set_active(True)
db_adapter.commit()
else: # pragma: no cover
flash(_('Invalid confirmation token.'), 'error')
return redirect(url_for('user.login'))
# Send email_confirmed signal
signals.user_confirmed_email.send(current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_('Your email has been confirmed.'), 'success')
# Auto-login after confirm or redirect to login page
safe_next = _get_safe_next_param('next', user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_confirm:
return _do_login_user(user, safe_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+quote(safe_next)) # redirect to login page
@login_required
@confirm_email_required
def change_password():
""" Prompt for old password and new password and change the user's password."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.change_password_form(request.form)
safe_next = _get_safe_next_param('next', user_manager.after_change_password_endpoint)
form.next.data = safe_next
# Process valid POST
if request.method=='POST' and form.validate():
# Hash password
hashed_password = user_manager.hash_password(form.new_password.data)
# Change password
user_manager.update_password(current_user, hashed_password)
# Send 'password_changed' email
if user_manager.enable_email and user_manager.send_password_changed_email:
emails.send_password_changed_email(current_user)
# Send password_changed signal
signals.user_changed_password.send(current_app._get_current_object(), user=current_user)
# Prepare one-time system message
flash(_('Your password has been changed successfully.'), 'success')
# Redirect to 'next' URL
safe_next = user_manager.make_safe_url_function(form.next.data)
return redirect(safe_next)
# Process GET or invalid POST
return render(user_manager.change_password_template, form=form)
@login_required
@confirm_email_required
def change_username():
""" Prompt for new username and old password and change the user's username."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.change_username_form(request.form)
safe_next = _get_safe_next_param('next', user_manager.after_change_username_endpoint)
form.next.data = safe_next
# Process valid POST
if request.method=='POST' and form.validate():
new_username = form.new_username.data
# Change username
user_auth = current_user.user_auth if db_adapter.UserAuthClass and hasattr(current_user, 'user_auth') else current_user
db_adapter.update_object(user_auth, username=new_username)
db_adapter.commit()
# Send 'username_changed' email
if user_manager.enable_email and user_manager.send_username_changed_email:
emails.send_username_changed_email(current_user)
# Send username_changed signal
signals.user_changed_username.send(current_app._get_current_object(), user=current_user)
# Prepare one-time system message
flash(_("Your username has been changed to '%(username)s'.", username=new_username), 'success')
# Redirect to 'next' URL
safe_next = user_manager.make_safe_url_function(form.next.data)
return redirect(safe_next)
# Process GET or invalid POST
return render(user_manager.change_username_template, form=form)
@login_required
@confirm_email_required
def email_action(id, action):
""" Perform action 'action' on UserEmail object 'id'
"""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Retrieve UserEmail by id
user_email = db_adapter.find_first_object(db_adapter.UserEmailClass, id=id)
# Users may only change their own UserEmails
if not user_email or user_email.user_id != int(current_user.get_id()):
return unauthorized()
if action=='delete':
# Primary UserEmail can not be deleted
if user_email.is_primary:
return unauthorized()
# Delete UserEmail
db_adapter.delete_object(user_email)
db_adapter.commit()
elif action=='make-primary':
# Disable previously primary emails
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=int(current_user.get_id()))
for ue in user_emails:
if ue.is_primary:
ue.is_primary = False
# Enable current primary email
user_email.is_primary = True
# Commit
db_adapter.commit()
elif action=='confirm':
_send_confirm_email(user_email.user, user_email)
else:
return unauthorized()
return redirect(url_for('user.manage_emails'))
def forgot_password():
"""Prompt for email and send reset password email."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.forgot_password_form(request.form)
# Process valid POST
if request.method=='POST' and form.validate():
email = form.email.data
user, user_email = user_manager.find_user_by_email(email)
if user:
user_manager.send_reset_password_email(email)
# Prepare one-time system message
flash(_("A reset password email has been sent to '%(email)s'. Open that email and follow the instructions to reset your password.", email=email), 'success')
# Redirect to the login page
return redirect(_endpoint_url(user_manager.after_forgot_password_endpoint))
# Process GET or invalid POST
return render(user_manager.forgot_password_template, form=form)
def login():
""" Prompt for username/email and password and sign the user in."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
safe_next = _get_safe_next_param('next', user_manager.after_login_endpoint)
safe_reg_next = _get_safe_next_param('reg_next', user_manager.after_register_endpoint)
# Immediately redirect already logged in users
if _call_or_get(current_user.is_authenticated) and user_manager.auto_login_at_login:
return redirect(safe_next)
# Initialize form
login_form = user_manager.login_form(request.form) # for login.html
register_form = user_manager.register_form() # for login_or_register.html
if request.method!='POST':
login_form.next.data = register_form.next.data = safe_next
login_form.reg_next.data = register_form.reg_next.data = safe_reg_next
# Process valid POST
if request.method=='POST' and login_form.validate():
# Retrieve User
user = None
user_email = None
if user_manager.enable_username:
# Find user record by username
user = user_manager.find_user_by_username(login_form.username.data)
user_email = None
# Find primary user_email record
if user and db_adapter.UserEmailClass:
user_email = db_adapter.find_first_object(db_adapter.UserEmailClass,
user_id=int(user.get_id()),
is_primary=True,
)
# Find user record by email (with form.username)
if not user and user_manager.enable_email:
user, user_email = user_manager.find_user_by_email(login_form.username.data)
else:
# Find user by email (with form.email)
user, user_email = user_manager.find_user_by_email(login_form.email.data)
if user:
# Log user in
safe_next = user_manager.make_safe_url_function(login_form.next.data)
return _do_login_user(user, safe_next, login_form.remember_me.data)
# Process GET or invalid POST
return render(user_manager.login_template,
form=login_form,
login_form=login_form,
register_form=register_form)
def logout():
""" Sign the user out."""
user_manager = current_app.user_manager
# Send user_logged_out signal
signals.user_logged_out.send(current_app._get_current_object(), user=current_user)
# Use Flask-Login to sign out user
logout_user()
# Prepare one-time system message
flash(_('You have signed out successfully.'), 'success')
# Redirect to logout_next endpoint or '/'
safe_next = _get_safe_next_param('next', user_manager.after_logout_endpoint)
return redirect(safe_next)
@login_required
@confirm_email_required
def manage_emails():
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
user_emails = db_adapter.find_all_objects(db_adapter.UserEmailClass, user_id=int(current_user.get_id()))
form = user_manager.add_email_form()
# Process valid POST request
if request.method=="POST" and form.validate():
user_emails = db_adapter.add_object(db_adapter.UserEmailClass,
user_id=int(current_user.get_id()),
email=form.email.data)
db_adapter.commit()
return redirect(url_for('user.manage_emails'))
# Process GET or invalid POST request
return render(user_manager.manage_emails_template,
user_emails=user_emails,
form=form,
)
def register():
""" Display registration form and create new User."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
safe_next = _get_safe_next_param('next', user_manager.after_login_endpoint)
safe_reg_next = _get_safe_next_param('reg_next', user_manager.after_register_endpoint)
# Initialize form
login_form = user_manager.login_form() # for login_or_register.html
register_form = user_manager.register_form(request.form) # for register.html
# invite token used to determine validity of registeree
invite_token = request.values.get("token")
# require invite without a token should disallow the user from registering
if user_manager.require_invitation and not invite_token:
flash("Registration is invite only", "error")
return redirect(url_for('user.login'))
user_invite = None
if invite_token and db_adapter.UserInvitationClass:
user_invite = db_adapter.find_first_object(db_adapter.UserInvitationClass, token=invite_token)
if user_invite:
register_form.invite_token.data = invite_token
else:
flash("Invalid invitation token", "error")
return redirect(url_for('user.login'))
if request.method!='POST':
login_form.next.data = register_form.next.data = safe_next
login_form.reg_next.data = register_form.reg_next.data = safe_reg_next
if user_invite:
register_form.email.data = user_invite.email
# Process valid POST
if request.method=='POST' and register_form.validate():
# Create a User object using Form fields that have a corresponding User field
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {}
# Create a UserEmail object using Form fields that have a corresponding UserEmail field
if db_adapter.UserEmailClass:
UserEmail = db_adapter.UserEmailClass
user_email_class_fields = UserEmail.__dict__
user_email_fields = {}
# Create a UserAuth object using Form fields that have a corresponding UserAuth field
if db_adapter.UserAuthClass:
UserAuth = db_adapter.UserAuthClass
user_auth_class_fields = UserAuth.__dict__
user_auth_fields = {}
# Enable user account
if db_adapter.UserProfileClass:
if hasattr(db_adapter.UserProfileClass, 'active'):
user_auth_fields['active'] = True
elif hasattr(db_adapter.UserProfileClass, 'is_enabled'):
user_auth_fields['is_enabled'] = True
else:
user_auth_fields['is_active'] = True
else:
if hasattr(db_adapter.UserClass, 'active'):
user_fields['active'] = True
elif hasattr(db_adapter.UserClass, 'is_enabled'):
user_fields['is_enabled'] = True
else:
user_fields['is_active'] = True
# For all form fields
for field_name, field_value in register_form.data.items():
# Hash password field
if field_name=='password':
hashed_password = user_manager.hash_password(field_value)
if db_adapter.UserAuthClass:
user_auth_fields['password'] = hashed_password
else:
user_fields['password'] = hashed_password
# Store corresponding Form fields into the User object and/or UserProfile object
else:
if field_name in user_class_fields:
user_fields[field_name] = field_value
if db_adapter.UserEmailClass:
if field_name in user_email_class_fields:
user_email_fields[field_name] = field_value
if db_adapter.UserAuthClass:
if field_name in user_auth_class_fields:
user_auth_fields[field_name] = field_value
# Add User record using named arguments 'user_fields'
user = db_adapter.add_object(User, **user_fields)
if db_adapter.UserProfileClass:
user_profile = user
# Add UserEmail record using named arguments 'user_email_fields'
if db_adapter.UserEmailClass:
user_email = db_adapter.add_object(UserEmail,
user=user,
is_primary=True,
**user_email_fields)
else:
user_email = None
# Add UserAuth record using named arguments 'user_auth_fields'
if db_adapter.UserAuthClass:
user_auth = db_adapter.add_object(UserAuth, **user_auth_fields)
if db_adapter.UserProfileClass:
user = user_auth
else:
user.user_auth = user_auth
require_email_confirmation = True
if user_invite:
if user_invite.email == register_form.email.data:
require_email_confirmation = False
db_adapter.update_object(user, confirmed_at=datetime.utcnow())
db_adapter.commit()
# Send 'registered' email and delete new User object if send fails
if user_manager.send_registered_email:
try:
# Send 'registered' email
_send_registered_email(user, user_email, require_email_confirmation)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user)
db_adapter.commit()
raise
# Send user_registered signal
signals.user_registered.send(current_app._get_current_object(),
user=user,
user_invite=user_invite)
# Redirect if USER_ENABLE_CONFIRM_EMAIL is set
if user_manager.enable_confirm_email and require_email_confirmation:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
return redirect(safe_reg_next)
# Auto-login after register or redirect to login page
if 'reg_next' in request.args:
safe_reg_next = user_manager.make_safe_url_function(register_form.reg_next.data)
else:
safe_reg_next = _endpoint_url(user_manager.after_confirm_endpoint)
if user_manager.auto_login_after_register:
return _do_login_user(user, safe_reg_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+quote(safe_reg_next)) # redirect to login page
# Process GET or invalid POST
return render(user_manager.register_template,
form=register_form,
login_form=login_form,
register_form=register_form)
@login_required
def invite():
""" Allows users to send invitations to register an account """
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
invite_form = user_manager.invite_form(request.form)
if request.method=='POST' and invite_form.validate():
email = invite_form.email.data
User = db_adapter.UserClass
user_class_fields = User.__dict__
user_fields = {
"email": email
}
user, user_email = user_manager.find_user_by_email(email)
if user:
flash("User with that email has already registered", "error")
return redirect(url_for('user.invite'))
else:
user_invite = db_adapter \
.add_object(db_adapter.UserInvitationClass, **{
"email": email,
"invited_by_user_id": current_user.id
})
db_adapter.commit()
token = user_manager.generate_token(user_invite.id)
accept_invite_link = url_for('user.register',
token=token,
_external=True)
# Store token
if hasattr(db_adapter.UserInvitationClass, 'token'):
user_invite.token = token
db_adapter.commit()
try:
# Send 'invite' email
emails.send_invite_email(user_invite, accept_invite_link)
except Exception as e:
# delete new User object if send fails
db_adapter.delete_object(user_invite)
db_adapter.commit()
raise
signals \
.user_sent_invitation \
.send(current_app._get_current_object(), user_invite=user_invite,
form=invite_form)
flash(_('Invitation has been sent.'), 'success')
safe_next = _get_safe_next_param('next', user_manager.after_invite_endpoint)
return redirect(safe_next)
return render(user_manager.invite_template, form=invite_form)
def resend_confirm_email():
"""Prompt for email and re-send email conformation email."""
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Initialize form
form = user_manager.resend_confirm_email_form(request.form)
# Process valid POST
if request.method=='POST' and form.validate():
email = form.email.data
# Find user by email
user, user_email = user_manager.find_user_by_email(email)
if user:
_send_confirm_email(user, user_email)
# Redirect to the login page
return redirect(_endpoint_url(user_manager.after_resend_confirm_email_endpoint))
# Process GET or invalid POST
return render(user_manager.resend_confirm_email_template, form=form)
def reset_password(token):
""" Verify the password reset token, Prompt for new password, and set the user's password."""
# Verify token
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
if _call_or_get(current_user.is_authenticated):
logout_user()
is_valid, has_expired, user_id = user_manager.verify_token(
token,
user_manager.reset_password_expiration)
if has_expired:
flash(_('Your reset password token has expired.'), 'error')
return redirect(_endpoint_url(user_manager.login_endpoint))
if not is_valid:
flash(_('Your reset password token is invalid.'), 'error')
return redirect(_endpoint_url(user_manager.login_endpoint))
user = user_manager.get_user_by_id(user_id)
# Mark email as confirmed
user_email = emails.get_primary_user_email(user)
user_email.confirmed_at = datetime.utcnow()
db_adapter.commit()
# Initialize form
form = user_manager.reset_password_form(request.form)
# Process valid POST
if request.method=='POST' and form.validate():
# Change password
hashed_password = user_manager.hash_password(form.new_password.data)
user_auth = user.user_auth if db_adapter.UserAuthClass and hasattr(user, 'user_auth') else user
db_adapter.update_object(user_auth, password=hashed_password)
db_adapter.commit()
# Send 'password_changed' email
if user_manager.enable_email and user_manager.send_password_changed_email:
emails.send_password_changed_email(user)
# Send user_reset_password signal
signals.user_reset_password.send(current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_("Your password has been reset successfully."), 'success')
# Auto-login after reset password or redirect to login page
safe_next = _get_safe_next_param('next', user_manager.after_reset_password_endpoint)
if user_manager.auto_login_after_reset_password:
return _do_login_user(user, safe_next) # auto-login
else:
return redirect(url_for('user.login')+'?next='+quote(safe_next)) # redirect to login page
# Process GET or invalid POST
return render(user_manager.reset_password_template, form=form)
def unconfirmed():
""" Prepare a Flash message and redirect to USER_UNCONFIRMED_ENDPOINT"""
# Prepare Flash message
url = request.script_root + request.path
flash(_("You must confirm your email to access '%(url)s'.", url=url), 'error')
# Redirect to USER_UNCONFIRMED_EMAIL_ENDPOINT
user_manager = current_app.user_manager
return redirect(_endpoint_url(user_manager.unconfirmed_email_endpoint))
def unauthenticated():
""" Prepare a Flash message and redirect to USER_UNAUTHENTICATED_ENDPOINT"""
user_manager = current_app.user_manager
# Prepare Flash message
url = request.url
flash(_("You must be signed in to access '%(url)s'.", url=url), 'error')
# Redirect to USER_UNAUTHENTICATED_ENDPOINT
safe_next = user_manager.make_safe_url_function(url)
return redirect(_endpoint_url(user_manager.unauthenticated_endpoint)+'?next='+quote(safe_next))
def unauthorized():
""" Prepare a Flash message and redirect to USER_UNAUTHORIZED_ENDPOINT"""
# Prepare Flash message
url = request.script_root + request.path
flash(_("You do not have permission to access '%(url)s'.", url=url), 'error')
# Redirect to USER_UNAUTHORIZED_ENDPOINT
user_manager = current_app.user_manager
return redirect(_endpoint_url(user_manager.unauthorized_endpoint))
@login_required
@confirm_email_required
def user_profile():
user_manager = current_app.user_manager
return render(user_manager.user_profile_template)
def _send_registered_email(user, user_email, require_email_confirmation=True):
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Send 'confirm_email' or 'registered' email
if user_manager.enable_email and user_manager.enable_confirm_email:
# Generate confirm email link
object_id = user_email.id if user_email else int(user.get_id())
token = user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Send email
emails.send_registered_email(user, user_email, confirm_email_link)
# Prepare one-time system message
if user_manager.enable_confirm_email and require_email_confirmation:
email = user_email.email if user_email else user.email
flash(_('A confirmation email has been sent to %(email)s with instructions to complete your registration.', email=email), 'success')
else:
flash(_('You have registered successfully.'), 'success')
def _send_confirm_email(user, user_email):
user_manager = current_app.user_manager
db_adapter = user_manager.db_adapter
# Send 'confirm_email' or 'registered' email
if user_manager.enable_email and user_manager.enable_confirm_email:
# Generate confirm email link
object_id = user_email.id if user_email else int(user.get_id())
token = user_manager.generate_token(object_id)
confirm_email_link = url_for('user.confirm_email', token=token, _external=True)
# Send email
emails.send_confirm_email_email(user, user_email, confirm_email_link)
# Prepare one-time system message
email = user_email.email if user_email else user.email
flash(_('A confirmation email has been sent to %(email)s with instructions to complete your registration.', email=email), 'success')
def _do_login_user(user, safe_next, remember_me=False):
# User must have been authenticated
if not user: return unauthenticated()
# Check if user account has been disabled
if not _call_or_get(user.is_active):
flash(_('Your account has not been enabled.'), 'error')
return redirect(url_for('user.login'))
# Check if user has a confirmed email address
user_manager = current_app.user_manager
if user_manager.enable_email and user_manager.enable_confirm_email \
and not current_app.user_manager.enable_login_without_confirm_email \
and not user.has_confirmed_email():
url = url_for('user.resend_confirm_email')
flash(_('Your email address has not yet been confirmed. Check your email Inbox and Spam folders for the confirmation email or <a href="%(url)s">Re-send confirmation email</a>.', url=url), 'error')
return redirect(url_for('user.login'))
# Use Flask-Login to sign in user
#print('login_user: remember_me=', remember_me)
login_user(user, remember=remember_me)
# Send user_logged_in signal
signals.user_logged_in.send(current_app._get_current_object(), user=user)
# Prepare one-time system message
flash(_('You have signed in successfully.'), 'success')
# Redirect to 'safe_next' URL
return redirect(safe_next)
def make_safe_url(url):
"""Makes a URL safe by removing optional hostname and port.
Example:
| ``make_safe_url('https://hostname:80/path1/path2?q1=v1&q2=v2#fragment')``
| returns ``'/path1/path2?q1=v1&q2=v2#fragment'``
Override this method if you need to allow a list of safe hostnames.
"""
# Split the URL into scheme, netloc, path, query and fragment
parts = list(urlsplit(url))
# Clear scheme and netloc and rebuild URL
parts[0] = '' # Empty scheme
parts[1] = '' # Empty netloc (hostname:port)
safe_url = urlunsplit(parts)
return safe_url
# 'next' and 'reg_next' query parameters contain quoted (URL-encoded) URLs
# that may contain unsafe hostnames.
# Return the query parameter as a safe, unquoted URL
def _get_safe_next_param(param_name, default_endpoint):
if param_name in request.args:
# return safe unquoted query parameter value
safe_next = current_app.user_manager.make_safe_url_function(unquote(request.args[param_name]))
else:
# return URL of default endpoint
safe_next = _endpoint_url(default_endpoint)
return safe_next
def _endpoint_url(endpoint):
url = '/'
if endpoint:
url = url_for(endpoint)
return url | PypiClean |
/FairMongo-5.2.0.tar.gz/FairMongo-5.2.0/FCM/MCCore.py | from pymongo import MongoClient, bulk
from pymongo.database import Database
from FCM import MCServers
from FCM.MCCollection import MCCollection
from F import DICT, DATE
from F.CLASS import FairClass
from dateutil import parser
import datetime
from F.LOG import Log
s = " "
Log = Log(f"MCCore")
"""
-> THE MASTER BASE CLASS
- The Database Instance Itself.
-> Does not need a collection to be initiated.
-> Other Classes inherent this object.
"""
class MCCore(FairClass, MCCollection):
core_connection_status = False
core_client: MongoClient
core_db: Database
core_collection: Database = False
core_bulk = bulk
# -> !!MAIN CONSTRUCTOR!! <-
def _constructor(self, url, databaseName):
Log.className = f"MCore HOST=[ {MCServers.db_environment_name} ], DATABASE=[ {databaseName} ]"
try:
Log.i(f"Initiating MongoDB: URI={url}")
self.core_client = MongoClient(host=url, connectTimeoutMS=10000)
if not self.core_client:
return False
if self.is_connected():
self.core_db = self.core_client.get_database(databaseName)
return self
except Exception as e:
Log.e(f"Unable to initiate MongoDB: URI={url}", error=e)
return False
return False
def connect(self, collectionName, dbUri=None, dbName=None):
try:
if not dbUri:
dbUri = MCServers.MONGO_DATABASE_URI
dbName = MCServers.db_name
self._constructor(dbUri, dbName)
self.set_ccollection(collectionName)
self.mcollection = self.core_collection
return self
except Exception as e:
Log.e("Failed to Connect to DB.", error=e)
return False
def is_connected(self) -> bool:
try:
info = self.core_client.server_info()
if info:
Log.d("MongoDB is Up.")
self.core_connection_status = True
return True
except Exception as e:
Log.e("MongoDB is Down.", error=e)
self.core_connection_status = False
return False
return False
def _get_collection(self, collection_name) -> Database:
"""
INTERNAL/PRIVATE ONLY
- DO NOT USE -
"""
self.core_collection = self.core_db.get_collection(collection_name)
return self.core_collection
def set_ccollection(self, collection_name):
"""
INTERNAL/PRIVATE ONLY
- DO NOT USE -
"""
self.construct_cc(self._get_collection(collection_name))
@staticmethod
def parse_date_for_query(date: str) -> datetime:
return datetime.datetime.strptime(date, "%B %d %Y")
""" OUT of database -> OFFICIAL DATE CONVERSION FROM DATABASE ENTRY <- """
@staticmethod
def from_db_date(str_date):
date_obj = parser.parse(str_date)
return date_obj
""" INTO database -> OFFICIAL DATE CONVERSION FOR DATABASE ENTRY <- """
@staticmethod
def to_db_date(t=None):
if t is None:
t = datetime.datetime.now()
date = str(t.strftime("%B")) + s + str(t.strftime("%d")) + s + str(t.strftime("%Y"))
return date
@staticmethod
def get_now_date():
return DATE.get_now_month_day_year_str()
@staticmethod
def parse_date(obj=None):
if type(obj) is str:
obj = DATE.parse_str_to_datetime(obj)
elif type(obj) is list:
return None
p_date = str(obj.strftime("%B")) + s + str(obj.strftime("%d")) + s + str(obj.strftime("%Y"))
return p_date
@staticmethod
def to_list(cursor):
return list(cursor)
@staticmethod
def to_counted_dict(cursor):
""" DEPRECATED """
result_dict = {}
for item in cursor:
_id = DICT.get("_id", item)
raw = DICT.get("raw_hookups", item)
count = len(raw)
result_dict[_id] = {"count": count,
"raw_hookups": raw}
return result_dict
@staticmethod
def cursor_count(cursor) -> int:
return len(list(cursor))
# if __name__ == '__main__':
# c = MCore().constructor()
# print(c) | PypiClean |
/EvolutionaryParameterGrid-0.0.4.tar.gz/EvolutionaryParameterGrid-0.0.4/evolutionary_grid/EvolutionaryParameterGrid.py | import array
import random
import numpy
from deap import base
from deap import creator
from deap import tools
from deap.algorithms import varAnd
def _mutIndividual(individual, up, indpb):
for i, up in zip(range(len(up)), up):
if random.random() < indpb:
individual[i] = random.randint(0, up)
return individual,
def _cxIndividual(ind1, ind2, indpb):
for i in range(len(ind1)):
if random.random() < indpb:
ind1[i], ind2[i] = ind2[i], ind1[i]
return ind1, ind2
class EAParameterGrid():
def __init__(self, generation=50, pop_size=50, cxpb=0.5, mutpb=0.2, tournament_size=10, skip_evaluated=True,
halloffame_size=1, batch_evaluate=False,
verbose=__debug__):
"""
EA parameter grid initialization
:param generation: Max number of generations to be evolved
:param pop_size: Population size of genetic algorithm
:param cxpb: Probability of gene mutation in chromosome
:param mutpb: Probability of gene swap between two chromosomes
:param tournament_size: Size of tournament for selection stage of genetic algorithm
:param skip_evaluated: Skip repetitive individuals
:param halloffame_size: Max number of history best individuals to be saved
:param batch_evaluate: Evaluate individuals in parallel
:param verbose: Controls the verbosity: the higher, the more messages.
"""
self.cxpb = cxpb
self.mutpb = mutpb
self.verbose = verbose
self.tournament_size = tournament_size
self.generation = generation
self.pop_size = pop_size
self.current_fitness = None
self.skip_evaluated = skip_evaluated
self.history_dict = dict()
self.halloffame = tools.HallOfFame(halloffame_size)
self.batch_evaluate = batch_evaluate
self.names = dict()
self.stats = tools.Statistics(lambda ind: ind.fitness.values)
self.stats.register("avg", numpy.mean)
self.stats.register("std", numpy.std)
self.stats.register("min", numpy.min)
self.stats.register("max", numpy.max)
if hasattr(creator, 'FitnessMax'):
del creator.FitnessMax
if hasattr(creator, 'Individual'):
del creator.Individual
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", array.array, typecode='b', fitness=creator.FitnessMax)
def set_fitness(self, current_fitness):
self.current_fitness = current_fitness
def _convert_individual_to_dict(self, individual):
final_dict = {}
for k, v in zip(self.names, list(individual)):
final_dict[k] = self.parameter_grid[k][v]
return final_dict
def _history_check(self, individual):
if tuple(individual) in self.history_dict:
return True
else:
return False
def best_individuals(self):
return [self._convert_individual_to_dict(hof) for hof in self.halloffame]
def grid(self, parameter_grid):
self.parameter_grid = parameter_grid
self.names = parameter_grid.keys()
maxints = [len(possible_values) - 1 for possible_values in parameter_grid.values()]
toolbox = base.Toolbox()
# Individual generator
def individual_generator():
return creator.Individual([random.randint(0, x) for x in maxints])
# Structure initializers
toolbox.register("population", tools.initRepeat, list, individual_generator)
toolbox.register("mate", _cxIndividual, indpb=self.cxpb)
toolbox.register("mutate", _mutIndividual, up=maxints, indpb=self.mutpb)
toolbox.register("select", tools.selTournament, tournsize=self.tournament_size)
population = toolbox.population(n=self.pop_size)
logbook = tools.Logbook()
logbook.header = ['gen', 'nevals'] + (self.stats.fields if self.stats else [])
yield from self._population_evaluation(population)
if self.halloffame is not None:
self.halloffame.update(population)
record = self.stats.compile(population) if self.stats else {}
logbook.record(gen=0, nevals=len(population), **record)
if self.verbose:
print(logbook.stream)
# Begin the generational process
for gen in range(1, self.generation + 1):
if self.batch_evaluate and self.skip_evaluated:
new_offsprings = []
offspring_set = set()
while len(new_offsprings) < len(population):
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, self.cxpb, self.mutpb)
for o in offspring:
if len(new_offsprings) >= len(population):
break
if tuple(o) in offspring_set or tuple(o) in self.history_dict:
continue
offspring_set.add(tuple(o))
new_offsprings.append(o)
offspring = new_offsprings
else:
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = varAnd(offspring, toolbox, self.cxpb, self.mutpb)
# Evaluate the individuals with an invalid fitness
yield from self._population_evaluation(offspring)
# Update the hall of fame with the generated individuals
if self.halloffame is not None:
self.halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = self.stats.compile(population) if self.stats else {}
logbook.record(gen=gen, nevals=len(offspring), **record)
if self.verbose:
print(logbook.stream)
return population, logbook
def _population_evaluation(self, population):
if self.batch_evaluate:
batch_list = []
batch_set = set()
for ind in population:
if self.skip_evaluated and self._history_check(ind):
ind.fitness.values = self.history_dict[tuple(ind)]
continue
else:
batch_list.append(ind)
batch_set.add(tuple(ind))
if self.skip_evaluated:
yield [self._convert_individual_to_dict(ind) for ind in batch_set]
else:
yield [self._convert_individual_to_dict(ind) for ind in batch_list]
for index, k in enumerate(batch_list):
k.fitness.values = (self.current_fitness[str(self._convert_individual_to_dict(k))],) \
if self.skip_evaluated else (self.current_fitness[index],)
self.history_dict[tuple(k)] = k.fitness.values
else:
for ind in population:
if self.skip_evaluated and self._history_check(ind):
ind.fitness.values = self.history_dict[tuple(ind)]
continue
yield self._convert_individual_to_dict(ind)
ind.fitness.values = self.current_fitness,
self.history_dict[tuple(ind)] = ind.fitness.values | PypiClean |
/EXIFnaming-0.0.21.tar.gz/EXIFnaming-0.0.21/README.md | # EXIFnaming
[](https://travis-ci.com/mvolkert/EXIFnaming)
[](https://badge.fury.io/py/EXIFnaming)

<!---[](https://codecov.io/gh/mvolkert/EXIFnaming)--->
<!------>
Renaming/Ordering/Modifying Photos using exiftool (https://exiftool.org/).
Developed for Panasonic Lumix TZ101 but other models may also work.
You are free to contact me for verifying the support of your Camera model.
## Functionalities:
* Ordering:
in folders by date
* Renaming:
to pattern:
[AbrivationforPlace][YearMonthDay]\_[Filenumber][SeriesType][SeriesSubNumber]\_[PhotoMode]
* Filtering:
move pictures acording to renaming pattern to subfolders.
for expample: all Braket series with 5 pictures to one folder
* Tag writing:
write a single csv with minimal information and
let write all tags, title, description and so on into the exif information.
* Geotagging
And many more...
## Usage:
It is designed to be used via ipython.
You do need at least basic knowlege about python.
The different functions can either be used via the toplevel module or via the submodules.
## Naming Conventions:
* AbrivationforPlace
can be choosen freely
* YearMonthDay
format can be choosen
* Filenumber
handled by rename method
shows the number of the picture or the series
* SeriesType
handled by rename method
* SeriesSubNumber
handled by rename method
shows the number of the picture within this series.
* PhotoMode
* Scene:
mapping between Advaned Scene names and Abbreviations
* Postprocessing:
* HDR:
* HDR-[HDR-Algorithm-Abr.]
* HDR-[HDR-Algorithm-Abr.]-[Tone Mapping-Preset-Abr.]
* HDR-[HDR-Algorithm-Abr.]-[Tone Mapping-Preset-Abr.]$[counter]
* HDR-[HDR-Algorithm-Abr.]$[[start]-[end]]
examples:
* "Colormix" Alorithm: HDR-C
* "Natural balanced" Tone Mapping: HDR-C-Nb
* secound version of HDR picture: HDR-C$2
* HDR picture consists only of the second to fifth picture of bracket series: HDR-C$[2-5]
* HDR picture consists of picture with counter 12,14 and 15: HDR-C$[12,14,15]
* Panorma:
* PANO
* PANO-[submode]$[counter]
* Tags
can be added to filename
have to be seperated by "_"
## EXIF Conventions
* Label:
Same as Filename used to retrive Filename if it was changed
* Title:
Main Tags, but can be chosen differently
* Description:
Contains main Description, Tags and Processing information.
Is Formated in a way that is nicely readable multiline and in plain view
Following Programms can read it: Flickr, FStop (Android), Google Fotos and maybe more
* User Comment: Same as Description. Windows can display it.
* Keywords/Subject:
Both are used store Tags of the pictures.
Following Programms can read it: Flickr, FStop (Android), Windows and maybe more
* Location: xpm LocationCreated is used
## EXIFnaming folder structure
The program creates a folder ".EXIFnaming" in your photodirectory:
* gps: put here your gpx files for geotagging
* infos: information files writen by multilple functions
* log: logfiles
* saves: renaming writes saves to restore old names
* setexif: put here your csv files for tag writing
## Camera Models
* Can be used basically with all camera models which are supported by https://exiftool.org/
* To use specialties of renaming like Series type or Scene mode, there has to be an implemention of ModelBase for your Camera Model
* Contact me to improve the support of your Camera Model
## Setup
Download https://exiftool.org/.
Then set EXIFnaming.settings.exiftool_directory to the location of the exiftool.exe.
You can do this for example by using `.ipython\profile_default\startup\start.py`.
Take also a look to other settings. | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dijit/form/TextBox.js | require({cache:{"url:dijit/form/templates/TextBox.html":"<div class=\"dijit dijitReset dijitInline dijitLeft\" id=\"widget_${id}\" role=\"presentation\"\n\t><div class=\"dijitReset dijitInputField dijitInputContainer\"\n\t\t><input class=\"dijitReset dijitInputInner\" data-dojo-attach-point='textbox,focusNode' autocomplete=\"off\"\n\t\t\t${!nameAttrSetting} type='${type}'\n\t/></div\n></div>\n"}});
define("dijit/form/TextBox",["dojo/_base/declare","dojo/dom-construct","dojo/dom-style","dojo/_base/kernel","dojo/_base/lang","dojo/_base/sniff","dojo/_base/window","./_FormValueWidget","./_TextBoxMixin","dojo/text!./templates/TextBox.html",".."],function(_1,_2,_3,_4,_5,_6,_7,_8,_9,_a,_b){
var _c=_1([_8,_9],{templateString:_a,_singleNodeTemplate:"<input class=\"dijit dijitReset dijitLeft dijitInputField\" data-dojo-attach-point=\"textbox,focusNode\" autocomplete=\"off\" type=\"${type}\" ${!nameAttrSetting} />",_buttonInputDisabled:_6("ie")?"disabled":"",baseClass:"dijitTextBox",postMixInProperties:function(){
var _d=this.type.toLowerCase();
if(this.templateString&&this.templateString.toLowerCase()=="input"||((_d=="hidden"||_d=="file")&&this.templateString==this.constructor.prototype.templateString)){
this.templateString=this._singleNodeTemplate;
}
this.inherited(arguments);
},_onInput:function(e){
this.inherited(arguments);
if(this.intermediateChanges){
var _e=this;
setTimeout(function(){
_e._handleOnChange(_e.get("value"),false);
},0);
}
},_setPlaceHolderAttr:function(v){
this._set("placeHolder",v);
if(!this._phspan){
this._attachPoints.push("_phspan");
this._phspan=_2.create("span",{className:"dijitPlaceHolder dijitInputField"},this.textbox,"after");
}
this._phspan.innerHTML="";
this._phspan.appendChild(document.createTextNode(v));
this._updatePlaceHolder();
},_updatePlaceHolder:function(){
if(this._phspan){
this._phspan.style.display=(this.placeHolder&&!this.focused&&!this.textbox.value)?"":"none";
}
},_setValueAttr:function(_f,_10,_11){
this.inherited(arguments);
this._updatePlaceHolder();
},getDisplayedValue:function(){
_4.deprecated(this.declaredClass+"::getDisplayedValue() is deprecated. Use set('displayedValue') instead.","","2.0");
return this.get("displayedValue");
},setDisplayedValue:function(_12){
_4.deprecated(this.declaredClass+"::setDisplayedValue() is deprecated. Use set('displayedValue', ...) instead.","","2.0");
this.set("displayedValue",_12);
},_onBlur:function(e){
if(this.disabled){
return;
}
this.inherited(arguments);
this._updatePlaceHolder();
},_onFocus:function(by){
if(this.disabled||this.readOnly){
return;
}
this.inherited(arguments);
this._updatePlaceHolder();
}});
if(_6("ie")){
_c=_1(_c,{declaredClass:"dijit.form.TextBox",_isTextSelected:function(){
var _13=_7.doc.selection.createRange();
var _14=_13.parentElement();
return _14==this.textbox&&_13.text.length==0;
},postCreate:function(){
this.inherited(arguments);
setTimeout(_5.hitch(this,function(){
try{
var s=_3.getComputedStyle(this.domNode);
if(s){
var ff=s.fontFamily;
if(ff){
var _15=this.domNode.getElementsByTagName("INPUT");
if(_15){
for(var i=0;i<_15.length;i++){
_15[i].style.fontFamily=ff;
}
}
}
}
}
catch(e){
}
}),0);
}});
_b._setSelectionRange=_9._setSelectionRange=function(_16,_17,_18){
if(_16.createTextRange){
var r=_16.createTextRange();
r.collapse(true);
r.moveStart("character",-99999);
r.moveStart("character",_17);
r.moveEnd("character",_18-_17);
r.select();
}
};
}else{
if(_6("mozilla")){
_c=_1(_c,{declaredClass:"dijit.form.TextBox",_onBlur:function(e){
this.inherited(arguments);
if(this.selectOnClick){
this.textbox.selectionStart=this.textbox.selectionEnd=undefined;
}
}});
}else{
_c.prototype.declaredClass="dijit.form.TextBox";
}
}
_5.setObject("dijit.form.TextBox",_c);
return _c;
}); | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/utils/classloader.py |
from importlib import import_module
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import cached_property
def load_class(name, setting):
"""Import module and creates class given by name in string."""
try:
module, attr = name.rsplit(".", 1)
except ValueError as error:
raise ImproperlyConfigured(
f'Error importing class {name} in {setting}: "{error}"'
)
try:
mod = import_module(module)
except ImportError as error:
raise ImproperlyConfigured(
f'Error importing module {module} in {setting}: "{error}"'
)
try:
return getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured(
f'Module "{module}" does not define a "{attr}" class in {setting}'
)
class ClassLoader:
"""Dict like object to lazy load list of classes."""
def __init__(self, name: str, construct: bool = True, collect_errors: bool = False):
self.name = name
self.construct = construct
self.collect_errors = collect_errors
self.errors = {}
def get_settings(self):
result = getattr(settings, self.name)
if result is None:
# Special case to disable all checks/...
result = []
elif not isinstance(result, (list, tuple)):
raise ImproperlyConfigured(f"Setting {self.name} must be list or tuple!")
return result
def load_data(self):
result = {}
value = self.get_settings()
for path in value:
try:
obj = load_class(path, self.name)
except ImproperlyConfigured as error:
self.errors[path] = error
if self.collect_errors:
continue
raise
if self.construct:
obj = obj()
result[obj.get_identifier()] = obj
return result
@cached_property
def data(self):
return self.load_data()
def __getitem__(self, key):
return self.data.__getitem__(key)
def __setitem__(self, key, value):
self.data.__setitem__(key, value)
def get(self, key):
return self.data.get(key)
def items(self):
return self.data.items()
def keys(self):
return self.data.keys()
def values(self):
return self.data.values()
def __iter__(self):
return self.data.__iter__()
def __len__(self):
return self.data.__len__()
def __contains__(self, item):
return self.data.__contains__(item)
def exists(self):
return bool(self.data)
def get_choices(self, empty=False, exclude=(), cond=lambda x: True):
result = [
(x, self[x].name)
for x in sorted(self)
if x not in exclude and cond(self[x])
]
if empty:
result.insert(0, ("", ""))
return result | PypiClean |
Subsets and Splits