id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/Cola-0.1.0b0.tar.gz/Cola-0.1.0b0/cola/functions/budget.py | import os
import threading
try:
import cPickle as pickle
except ImportError:
import pickle
from cola.core.utils import get_rpc_prefix
from cola.core.rpc import client_call
FUNC_PREFIX = "budget_apply_"
SUFFICIENT, NOAPPLIED, ALLFINISHED = range(3)
DEFAULT_BUDGETS = 3
BUDGET_APPLY_STATUS_FILENAME = 'budget.apply.status'
def synchronized(func):
def inner(self, *args, **kw):
with self.lock:
return func(self, *args, **kw)
return inner
class BudgetApplyServer(object):
def __init__(self, working_dir, settings,
rpc_server=None, app_name=None):
self.dir_ = working_dir
self.settings = settings
self.rpc_server = rpc_server
self.app_name = app_name
self.budgets = settings.job.size
self.limit = self.budgets >= 0
self.applied = 0
self.finished = 0
self.lock = threading.Lock()
if not os.path.exists(self.dir_):
os.makedirs(self.dir_)
self.load()
self.set_status()
self._register_rpc()
def _register_rpc(self):
if self.rpc_server is not None:
self.register_rpc(self, self.rpc_server, app_name=self.app_name)
@classmethod
def register_rpc(cls, budget_server, rpc_server, app_name=None):
prefix = get_rpc_prefix(app_name=app_name, prefix=FUNC_PREFIX)
rpc_server.register_function(budget_server.set_budgets,
name='set_budgets', prefix=prefix)
rpc_server.register_function(budget_server.inc_budgets,
name='inc_budgets', prefix=prefix)
rpc_server.register_function(budget_server.dec_budgets,
name='dec_budgets', prefix=prefix)
rpc_server.register_function(budget_server.apply,
name='apply', prefix=prefix)
rpc_server.register_function(budget_server.finish,
name='finish', prefix=prefix)
rpc_server.register_function(budget_server.error,
name='error', prefix=prefix)
def set_status(self):
assert self.finished <= self.applied
if not self.limit or self.applied < self.budgets:
self.status = SUFFICIENT
elif self.applied >= self.budgets and \
self.finished < self.budgets:
self.status = NOAPPLIED
elif self.finished >= self.budgets:
self.status = ALLFINISHED
else:
raise RuntimeError('size of applied and finished is impossible')
def get_status(self):
return self.status
def shutdown(self):
self.save()
def save(self):
save_file = os.path.join(self.dir_, BUDGET_APPLY_STATUS_FILENAME)
with open(save_file, 'w') as f:
t = (self.applied, self.finished)
pickle.dump(t, f)
def load(self):
save_file = os.path.join(self.dir_, BUDGET_APPLY_STATUS_FILENAME)
if os.path.exists(save_file):
with open(save_file) as f:
self.applied, self.finished = pickle.load(f)
@synchronized
def set_budgets(self, budgets):
self.budgets = budgets
self.limit = self.budgets >= 0
self.set_status()
@synchronized
def inc_budgets(self, budgets):
if self.limit:
self.budgets += budgets
self.set_status()
@synchronized
def dec_budgets(self, budgets):
if self.limit:
self.budgets -= budgets
self.set_status()
@synchronized
def apply(self, budget):
if not self.limit:
result = budget
else:
rest = self.budgets - self.applied
result = max(min(budget, rest), 0)
self.applied += result
self.set_status()
return result
@synchronized
def finish(self, size=1):
self.finished += size
self.finished = min(self.applied, self.finished)
self.set_status()
@synchronized
def error(self, size=1):
self.applied -= size
self.applied = max(self.applied, self.finished)
self.set_status()
class BudgetApplyClient(object):
def __init__(self, server, app_name=None):
self.server = server
self.prefix = get_rpc_prefix(app_name, FUNC_PREFIX)
def _call(self, func, *args):
if isinstance(self.server, basestring):
return client_call(self.server, self.prefix+func, *args)
else:
return getattr(self.server, func)(*args)
def apply(self, budget):
return self._call('apply', budget)
def finish(self, size=1):
return self._call('finish', size)
def error(self, size=1):
return self._call('error', size)
def set_budget(self, budget):
return self._call('set_budget', budget)
def inc_budget(self, budget):
return self._call('inc_budget', budget)
def dec_budget(self, budget):
return self._call('dec_budget', budget) | PypiClean |
/DevContest-0.4.tar.gz/DevContest-0.4/devcontest/public/jsmath/jsMath-BaKoMa-fonts.js | *
* The BaKoMa fonts have a different encoding, so change the characters
* to correspond to the their encoding.
*/
if (jsMath.browser == "Mozilla" && jsMath.platform != "mac") {
/*
* Mozilla/PC
*/
jsMath.Update.TeXfontCodes({
cmr10: [
'Γ', 'Δ', 'Θ', 'Λ',
'Ξ', 'Π', 'Σ', 'Υ',
'Φ', 'Ψ', 'Ω', 'ff',
'fi', 'fl', 'ffi', 'ffl',
'ı', '', '̀', '́',
'̌', '̆', '̅', '̊',
'̧', 'ß', 'æ', 'œ',
'ø', 'Æ', 'Œ', 'Ø',
'̷', '!', '”', '#',
'$', '%', '&', ''',
'(', ')', '*', '+',
',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', ':', ';', '¡', '=', '¿', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z', '[', '“', ']', '̂', '̇',
'‘', 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', '–', '—', '̋', '̃', '̈'
],
cmmi10: [
'Γ', 'Δ', 'Θ', 'Λ',
'Ξ', 'Π', 'Σ', 'Υ',
'Φ', 'Ψ', 'Ω', 'α',
'β', 'γ', 'δ', 'ε',
'ζ', 'η', 'θ', 'ι',
'κ', 'λ', 'μ', 'ν',
'ξ', 'π', 'ρ', 'σ',
'τ', 'υ', 'φ', 'χ',
'ψ', 'ω', 'ɛ', 'ϑ',
'ϖ', 'ϱ', 'ς', 'ϕ',
'↼', '↽', '⇀', '⇁',
'', '', '▹', '◃',
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', '.', ',', '<', '/', '>', '⋆',
'∂', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z', '♭', '♮', '♯', '⌣', '⌢',
'ℓ', 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', 'ı', '', '℘', '⃗', '̑'
],
cmsy10: [
'−', '·', '×', '⋆',
'÷', '⋄', '±', '∓',
'⊕', '⊖', '⊗', '⊘',
'⊙', '○', '∘', '∙',
'≃', '≍', '⊆', '⊇',
'≤', '≥', '≼', '≽',
'∼', '≅', '⊂', '⊃',
'≪', '≫', '≺', '≻',
'←', '→', '↑', '↓',
'↔', '↗', '↘', '≂',
'⇐', '⇒', '⇑', '⇓',
'⇔', '↖', '↙', '∝',
'′', '∞', '∈', '∋',
'△', '▽', '̸', '',
'∀', '∃', '¬', '∅',
'ℜ', 'ℑ', '⊤', '⊥',
'ℵ', '', 'ℬ', '',
'', 'ℰ', 'ℱ', '',
'ℋ', 'ℐ', '', '',
'ℒ', 'ℳ', '', '',
'', '', 'ℛ', '',
'', '', '', '',
'', '', '', '∪',
'∩', '⊎', '∧', '∨',
'⊢', '⊣', '⌊', '⌋',
'⌈', '⌉', '{', '}',
'〈', '〉', '∣', '∥',
'↕', '⇕', '∖', '≀',
'√', '∐', '∇', '∫',
'⊔', '⊓', '⊑', '⊒',
'§', '†', '‡', '¶',
'♣', '♢', '♡', '♠'
],
cmex10: [
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', '',
'', '', '', ''
]
});
/*
* Adjust a few other characters as well
*/
jsMath.Update.TeXfonts({
cmr10: {'20': {c: 'ˇ', tclass: 'normal', w: .3}},
cmmi10: {
'20': {c: '<i>&kappa</i>', tclass: 'normal'},
'58': {c: '.', tclass: 'normal'},
'59': {c: ',', tclass: 'normal'},
'61': {c: '/', tclass: 'cmr10'}
},
cmsy10: {
'3': {c: '*', tclass: 'normal'},
'16': {c: '≍'},
'17': {c: '≡', tclass: 'normal'},
'25': {c: '≈', tclass: 'normal'},
'39': {c: '≃'},
'20': {c: '≤', tclass: 'normal'}
},
cmex10: {'20': {c: '<span style="font-size: 80%"></span>'}},
cmti10: {'10': {c: '<i>Ω</i>', tclass: 'normal'}},
cmbx10: {'10': {c: '<b>Ω</b>', tclass: 'normal'}}
});
} else {
jsMath.Font.BaKoMa = [
'¡', '¢', '£', '¤', '¥', '¦', '§', '¨',
'©', 'ª', '­', '®', '¯', '°', '±', '²',
'³', '´', 'µ', '¶', '∙', '¸', '¹', 'º',
'»', '¼', '½', '¾', '¿', 'À', 'Á', 'Â',
'Ã', '!', '"', '#', '$', '%', '&', '\'',
'(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', ':', ';', '<', '=', '>', '?',
'@', 'A', 'B', 'C', 'D', 'E', 'F', 'G',
'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O',
'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',
'X', 'Y', 'Z', '[', '\\', ']', '^', '_',
'`', 'a', 'b', 'c', 'd', 'e', 'f', 'g',
'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', '{', '|', '}', '~', 'Ä'
];
jsMath.Update.TeXfontCodes({
cmr10: jsMath.Font.BaKoMa,
cmmi10: jsMath.Font.BaKoMa,
cmsy10: jsMath.Font.BaKoMa,
cmex10: jsMath.Font.BaKoMa,
cmti10: jsMath.Font.BaKoMa,
cmbx10: jsMath.Font.BaKoMa
});
/*
* MSIE corrections
*/
switch (jsMath.browser) {
case "MSIE":
if (jsMath.platform == "pc") {
/*
* MSIE/PC
*/
jsMath.Browser.msieFontBug = 1;
jsMath.Update.TeXfonts({
cmr10: {'10': {c: 'Ω', tclass: 'normal'}},
cmmi10: {
'10': {c: '<i>Ω</i>', tclass: 'normal'},
'126': {c: '~<span style="margin-left:.1em"></span>'}
},
cmsy10: {
'10': {c: '⊗', tclass: 'arial'},
'55': {c: '<span style="margin-right:-.54em">7</span>'}
},
cmex10: {'10': {c: '<span style="font-size: 67%">D</span>'}},
cmti10: {'10': {c: '<i>Ω</i>', tclass: 'normal'}},
cmbx10: {'10': {c: '<b>Ω</b>', tclass: 'normal'}}
});
} else {
/*
* MSIE/Mac
*/
jsMath.Update.TeXfonts({
cmr10: {
'3': {c: '<font face="Symbol">L</font>', tclass: 'normal'},
'5': {c: '<font face="Symbol">P</font>', tclass: 'normal'},
'10': {c: '<font face="Symbol">W</font>', tclass: 'normal'},
'15': {c: 'ffl', tclass: 'normal'},
'16': {c: 'ı', tclass: 'normal'},
'20': {c: 'ˇ', tclass: 'normal'},
'22': {c: '¯', tclass: 'normal', w: .3},
'25': {c: 'ß', tclass: 'normal'},
'26': {c: 'æ', tclass: 'normal'},
'27': {c: 'œ', tclass: 'normal'}
},
cmmi10: {
'3': {c: '<font face="Symbol">L</font>', tclass: 'italic'},
'5': {c: '<font face="Symbol">P</font>', tclass: 'italic'},
'10': {c: '<font face="Symbol">W</font>', tclass: 'italic'},
'15': {c: '<font face="Symbol">e</font>', tclass: 'italic'},
'16': {c: '<font face="Symbol">z</font>', tclass: 'italic'},
'20': {c: '<font face="Symbol">k</font>', tclass: 'italic'},
'22': {c: '<font face="Symbol">m</font>', tclass: 'italic'},
'25': {c: '<font face="Symbol">p</font>', tclass: 'italic'},
'26': {c: '<font face="Symbol">r</font>', tclass: 'italic'},
'27': {c: '<font face="Symbol">s</font>', tclass: 'italic'}
},
cmsy10: {
'3': {c: '<span style="vertical-align:-.3em">*</span>', tclass: 'normal'},
'5': {c: 'Ή', tclass: 'normal'},
'10': {c: '⊗', tclass: 'normal'},
'15': {c: '•', tclass: 'normal'},
'16': {c: '≍', tclass: 'normal'},
'20': {c: '≤', tclass: 'normal'},
'22': {c: '≤', tclass: 'normal'},
'25': {c: '≈', tclass: 'normal'},
'26': {c: '<font face="Symbol">Ì</font>', tclass: 'normal'},
'27': {c: '<font face="Symbol">É</font>', tclass: 'normal'}
},
cmex10: {
'3': {c: '<span style="font-size: 67%">i</span>'},
'5': {c: '<span style="font-size: 67%">k</span>'},
'10': {c: '<span style="font-size: 67%">D</span>'},
'15': {c: '<span style="font-size: 55%">Â</span>'},
'16': {c: '<span style="font-size: 83%">µ</span>'},
'20': {c: '<span style="font-size: 83%">"</span>'},
'22': {c: '<span style="font-size: 83%">$</span>'},
'25': {c: '<span style="font-size: 83%">\'</span>'},
'26': {c: '<span style="font-size: 83%">(</span>'},
'27': {c: '<span style="font-size: 83%">)</span>'}
},
cmti10: {
'3': {c: '<font face="Symbol">L</font>', tclass: 'italic'},
'5': {c: '<font face="Symbol">P</font>', tclass: 'italic'},
'10': {c: '<font face="Symbol">W</font>', tclass: 'italic'},
'16': {c: 'ı', tclass: 'italic'},
'20': {c: '­', tclass: 'italic'},
'22': {c: '¯', tclass: 'italic', w: .3},
'25': {c: 'ß', tclass: 'italic'},
'26': {c: 'æ', tclass: 'italic'},
'27': {c: 'œ', tclass: 'italic'}
},
cmbx10: {
'3': {c: '<font face="Symbol">L</font>', tclass: 'bold'},
'5': {c: '<font face="Symbol">P</font>', tclass: 'bold'},
'10': {c: '<font face="Symbol">W</font>', tclass: 'bold'},
'16': {c: 'ı', tclass: 'bold'},
'20': {c: '­', tclass: 'bold'},
'22': {c: '¯', tclass: 'bold', w: .3},
'25': {c: 'ß', tclass: 'bold'},
'26': {c: 'æ', tclass: 'bold'},
'27': {c: 'œ', tclass: 'bold'}
}
});
}
break;
case "Mozilla":
if (jsMath.platform == "mac") {
/*
* Mozilla/Mac
*/
jsMath.Update.TeXfonts({
cmr10: {'10': {c: 'Ω', tclass: 'normal'}},
cmmi10: {'10': {c: '<i>Ω</i>', tclass: 'normal'}},
cmsy10: {'10': {c: '⊗', tclass: 'normal'}},
cmex10: {'10': {c: '<span style="font-size: 67%">D</span>'}},
cmti10: {'10': {c: '<i>Ω</i>', tclass: 'normal'}},
cmbx10: {'10': {c: '<b>Ω</b>', tclass: 'normal'}}
});
}
break;
case "Opera":
jsMath.Update.TeXfonts({
cmr10: {
'10': {c: 'Ω', tclass: 'normal'},
'20': {c: 'ˇ', tclass: 'normal'}
},
cmmi10: {
'10': {c: '<i>Ω</i>', tclass: 'normal'},
'20': {c: 'κ', tclass: 'normal'}
},
cmsy10: {
'10': {c: '⊗', tclass: 'normal'},
'20': {c: '≤', tclass: 'normal'}
},
cmex10: {
'10': {c: '<span style="font-size: 67%">D</span>'},
'20': {c: '<span style="font-size: 82%">"</span>'}
},
cmti10: {
'10': {c: '<i>Ω</i>', tclass: 'normal'},
'20': {c: '<i>ˇ</i>', tclass: 'normal'}
},
cmbx10: {
'10': {c: '<b>Ω</b>', tclass: 'normal'},
'20': {c: '<b>ˇ</b>', tclass: 'normal'}
}
});
break;
case "Konqueror":
jsMath.Update.TeXfonts({
cmr10: {'20': {c: 'ˇ', tclass: 'normal'}},
cmmi10: {'20': {c: 'κ', tclass: 'normal'}},
cmsy10: {'20': {c: '≤', tclass: 'normal'}},
cmex10: {'20': {c: '<span style="font-size: 84%">"</span>'}},
cmti10: {'20': {c: '<i>ˇ</i>', tclass: 'normal'}},
cmbx10: {'20': {c: '<b>ˇ</b>', tclass: 'normal'}}
});
break;
}
}
jsMath.Setup.Styles({
'.typeset .cmr10': 'font-family: CMR10, serif',
'.typeset .cmbx10': 'font-family: CMBX10, CMR10',
'.typeset .cmti10': 'font-family: CMTI10, CMR10',
'.typeset .cmmi10': 'font-family: CMMI10',
'.typeset .cmsy10': 'font-family: CMSY10',
'.typeset .cmex10': 'font-family: CMEX10',
'.typeset .arial': "font-family: 'Arial unicode MS'"
}); | PypiClean |
/edward-1.3.5.tar.gz/edward-1.3.5/edward/criticisms/evaluate.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow as tf
from edward.models import RandomVariable
from edward.util import check_data, get_session, compute_multinomial_mode, \
with_binary_averaging
try:
from edward.models import Bernoulli, Binomial, Categorical, \
Multinomial, OneHotCategorical
except Exception as e:
raise ImportError("{0}. Your TensorFlow version is not supported.".format(e))
def evaluate(metrics, data, n_samples=500, output_key=None, seed=None):
"""Evaluate fitted model using a set of metrics.
A metric, or scoring rule [@winkler1994evaluating], is a function of
observed data under the posterior predictive distribution. For
example in supervised metrics such as classification accuracy, the
observed data (true output) is compared to the posterior
predictive's mean (predicted output). In unsupervised metrics such
as log-likelihood, the probability of observing the data is
calculated under the posterior predictive's log-density.
Args:
metrics: list of str and/or (str, params: dict) tuples, str,
or (str, params: dict) tuple.
List of metrics or a single metric:
`'binary_accuracy'`,
`'categorical_accuracy'`,
`'sparse_categorical_accuracy'`,
`'log_loss'` or `'binary_crossentropy'`,
`'categorical_crossentropy'`,
`'sparse_categorical_crossentropy'`,
`'hinge'`,
`'squared_hinge'`,
`'mse'` or `'MSE'` or `'mean_squared_error'`,
`'mae'` or `'MAE'` or `'mean_absolute_error'`,
`'mape'` or `'MAPE'` or `'mean_absolute_percentage_error'`,
`'msle'` or `'MSLE'` or `'mean_squared_logarithmic_error'`,
`'poisson'`,
`'cosine'` or `'cosine_proximity'`,
`'log_lik'` or `'log_likelihood'`.
In lieu of a metric string, this method also accepts (str, params: dict)
tuples; the first element of this tuple is the metric string, and
the second is a dict of associated params. At present, this dict only
expects one key, `'average'`, which stipulates the type of averaging to
perform on those metrics that permit binary averaging. Permissible
options include: `None`, `'macro'` and `'micro'`.
data: dict.
Data to evaluate model with. It binds observed variables (of type
`RandomVariable` or `tf.Tensor`) to their realizations (of
type `tf.Tensor`). It can also bind placeholders (of type
`tf.Tensor`) used in the model to their realizations.
n_samples: int, optional.
Number of posterior samples for making predictions, using the
posterior predictive distribution.
output_key: RandomVariable or tf.Tensor, optional.
It is the key in `data` which corresponds to the model's output.
seed: a Python integer. Used to create a random seed for the
distribution
Returns:
list of float or float.
A list of evaluations or a single evaluation.
Raises:
NotImplementedError.
If an input metric does not match an implemented metric in Edward.
#### Examples
```python
# build posterior predictive after inference: it is
# parameterized by a posterior sample
x_post = ed.copy(x, {z: qz, beta: qbeta})
# log-likelihood performance
ed.evaluate('log_likelihood', data={x_post: x_train})
# classification accuracy
# here, `x_ph` is any features the model is defined with respect to,
# and `y_post` is the posterior predictive distribution
ed.evaluate('binary_accuracy', data={y_post: y_train, x_ph: x_train})
# mean squared error
ed.evaluate('mean_squared_error', data={y: y_data, x: x_data})
```
# mean squared logarithmic error with `'micro'` averaging
ed.evaluate(('mean_squared_logarithmic_error', {'average': 'micro'}),
data={y: y_data, x: x_data})
"""
sess = get_session()
if isinstance(metrics, str):
metrics = [metrics]
elif callable(metrics):
metrics = [metrics]
elif not isinstance(metrics, list):
raise TypeError("metrics must have type str or list, or be callable.")
check_data(data)
if not isinstance(n_samples, int):
raise TypeError("n_samples must have type int.")
if output_key is None:
# Default output_key to the only data key that isn't a placeholder.
keys = [key for key in six.iterkeys(data) if not
isinstance(key, tf.Tensor) or "Placeholder" not in key.op.type]
if len(keys) == 1:
output_key = keys[0]
else:
raise KeyError("User must specify output_key.")
elif not isinstance(output_key, RandomVariable):
raise TypeError("output_key must have type RandomVariable.")
# Create feed_dict for data placeholders that the model conditions
# on; it is necessary for all session runs.
feed_dict = {key: value for key, value in six.iteritems(data)
if isinstance(key, tf.Tensor) and "Placeholder" in key.op.type}
# Form true data.
y_true = data[output_key]
# Make predictions (if there are any supervised metrics).
if metrics != ['log_lik'] and metrics != ['log_likelihood']:
binary_discrete = (Bernoulli, Binomial)
categorical_discrete = (Categorical, Multinomial, OneHotCategorical)
total_count = sess.run(getattr(output_key, 'total_count', tf.constant(1.)))
if isinstance(output_key, binary_discrete + categorical_discrete):
# Average over realizations of their probabilities, then predict
# via argmax over probabilities.
probs = [sess.run(output_key.probs, feed_dict) for _ in range(n_samples)]
probs = np.sum(probs, axis=0) / n_samples
if isinstance(output_key, binary_discrete):
# make random prediction whenever probs is exactly 0.5
random = tf.random_uniform(shape=tf.shape(probs))
y_pred = tf.round(tf.where(tf.equal(0.5, probs), random, probs))
else:
if total_count > 1:
mode = compute_multinomial_mode(probs, total_count, seed)
if len(output_key.sample_shape):
y_pred = tf.reshape(tf.tile(mode, output_key.sample_shape),
[-1, len(probs)])
else:
y_pred = mode
else:
y_pred = tf.argmax(probs, len(probs.shape) - 1)
probs = tf.constant(probs)
else:
# Monte Carlo estimate the mean of the posterior predictive.
y_pred = [sess.run(output_key, feed_dict) for _ in range(n_samples)]
y_pred = tf.cast(tf.add_n(y_pred), y_pred[0].dtype) / \
tf.cast(n_samples, y_pred[0].dtype)
if len(y_true.shape) == 0:
y_true = tf.expand_dims(y_true, 0)
y_pred = tf.expand_dims(y_pred, 0)
# Evaluate y_true (according to y_pred if supervised) for all metrics.
evaluations = []
for metric in metrics:
if isinstance(metric, tuple):
metric, params = metric
else:
params = {}
if metric == 'accuracy' or metric == 'crossentropy':
# automate binary or sparse cat depending on its support
support = sess.run(tf.reduce_max(y_true), feed_dict)
if support <= 1:
metric = 'binary_' + metric
else:
metric = 'sparse_categorical_' + metric
if metric == 'binary_accuracy':
evaluations += [binary_accuracy(y_true, y_pred, **params)]
elif metric == 'categorical_accuracy':
evaluations += [categorical_accuracy(y_true, y_pred, **params)]
elif metric == 'sparse_categorical_accuracy':
evaluations += [sparse_categorical_accuracy(y_true, y_pred, **params)]
elif metric == 'log_loss' or metric == 'binary_crossentropy':
evaluations += [binary_crossentropy(y_true, y_pred, **params)]
elif metric == 'categorical_crossentropy':
evaluations += [categorical_crossentropy(y_true, y_pred, **params)]
elif metric == 'sparse_categorical_crossentropy':
evaluations += [sparse_categorical_crossentropy(y_true, y_pred, **params)]
elif metric == 'multinomial_accuracy':
evaluations += [multinomial_accuracy(y_true, y_pred, **params)]
elif metric == 'kl_divergence':
y_true_ = y_true / total_count
y_pred_ = probs
evaluations += [kl_divergence(y_true_, y_pred_, **params)]
elif metric == 'hinge':
evaluations += [hinge(y_true, y_pred, **params)]
elif metric == 'squared_hinge':
evaluations += [squared_hinge(y_true, y_pred, **params)]
elif (metric == 'mse' or metric == 'MSE' or
metric == 'mean_squared_error'):
evaluations += [mean_squared_error(y_true, y_pred, **params)]
elif (metric == 'mae' or metric == 'MAE' or
metric == 'mean_absolute_error'):
evaluations += [mean_absolute_error(y_true, y_pred, **params)]
elif (metric == 'mape' or metric == 'MAPE' or
metric == 'mean_absolute_percentage_error'):
evaluations += [mean_absolute_percentage_error(y_true, y_pred, **params)]
elif (metric == 'msle' or metric == 'MSLE' or
metric == 'mean_squared_logarithmic_error'):
evaluations += [mean_squared_logarithmic_error(y_true, y_pred, **params)]
elif metric == 'poisson':
evaluations += [poisson(y_true, y_pred, **params)]
elif metric == 'cosine' or metric == 'cosine_proximity':
evaluations += [cosine_proximity(y_true, y_pred, **params)]
elif metric == 'log_lik' or metric == 'log_likelihood':
# Monte Carlo estimate the log-density of the posterior predictive.
tensor = tf.reduce_mean(output_key.log_prob(y_true))
log_pred = [sess.run(tensor, feed_dict) for _ in range(n_samples)]
log_pred = tf.add_n(log_pred) / tf.cast(n_samples, tensor.dtype)
evaluations += [log_pred]
elif callable(metric):
evaluations += [metric(y_true, y_pred, **params)]
else:
raise NotImplementedError("Metric is not implemented: {}".format(metric))
if len(evaluations) == 1:
return sess.run(evaluations[0], feed_dict)
else:
return sess.run(evaluations, feed_dict)
# Classification metrics
def binary_accuracy(y_true, y_pred):
"""Binary prediction accuracy, also known as 0/1-loss.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s (most generally, any real values a and b).
y_pred: tf.Tensor.
Tensor of predictions, with same shape as `y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def categorical_accuracy(y_true, y_pred):
"""Multi-class prediction accuracy. One-hot representation for `y_true`.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s, where the outermost dimension of size `K`
has only one 1 per row.
y_pred: tf.Tensor.
Tensor of predictions, with shape `y_true.shape[:-1]`. Each
entry is an integer {0, 1, ..., K-1}.
"""
y_true = tf.cast(tf.argmax(y_true, len(y_true.shape) - 1), tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def sparse_categorical_accuracy(y_true, y_pred):
"""Multi-class prediction accuracy. Label {0, 1, .., K-1}
representation for `y_true`.
Args:
y_true: tf.Tensor.
Tensor of integers {0, 1, ..., K-1}.
y_pred: tf.Tensor.
Tensor of predictions, with same shape as `y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
# Classification metrics (with real-valued predictions)
def binary_crossentropy(y_true, y_pred):
"""Binary cross-entropy.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s.
y_pred: tf.Tensor.
Tensor of real values (logit probabilities), with same shape as
`y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=y_pred, labels=y_true))
def categorical_crossentropy(y_true, y_pred):
"""Multi-class cross entropy. One-hot representation for `y_true`.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s, where the outermost dimension of size K
has only one 1 per row.
y_pred: tf.Tensor.
Tensor of real values (logit probabilities), with same shape as
`y_true`. The outermost dimension is the number of classes.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=y_pred, labels=y_true))
def sparse_categorical_crossentropy(y_true, y_pred):
"""Multi-class cross entropy. Label {0, 1, .., K-1} representation
for `y_true.`
Args:
y_true: tf.Tensor.
Tensor of integers {0, 1, ..., K-1}.
y_pred: tf.Tensor.
Tensor of real values (logit probabilities), with shape
`(y_true.shape, K)`. The outermost dimension is the number of classes.
"""
y_true = tf.cast(y_true, tf.int64)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=y_pred, labels=y_true))
def multinomial_accuracy(y_true, y_pred):
"""Multinomial prediction accuracy. `y_true` is a tensor
of integers, where the outermost dimension gives a draw
from a Multinomial distribution.
NB: In evaluating the accuracy between two Multinomials
results may vary across evaluations. This is because Edward's
algorithm for computing `y_pred`, i.e. the Multinomial
mode, yields variable results if `any(isinstance(p, float)
for p in total_count * probs)` (where `probs` is a vector
of the predicted Multinomial probabilities).
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.cast(tf.equal(y_true, y_pred), tf.float32))
def kl_divergence(y_true, y_pred):
"""Kullback-Leibler divergence between two probability distributions. A
vector of probabilities for `y_true`.
Args:
y_true: tf.Tensor.
Tensor of real values (probabilities) where the values in each row
of the outermost dimension sum to 1.
y_pred: tf.Tensor.
Same as `y_true`, and with the same shape.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
zeros = tf.zeros(shape=(tf.shape(y_true)))
summand = tf.where(tf.equal(y_true, 0.0), zeros,
y_true * (tf.log(y_true) - tf.log(y_pred)))
return tf.reduce_sum(summand)
def hinge(y_true, y_pred):
"""Hinge loss.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s.
y_pred: tf.Tensor.
Tensor of real values, with same shape as `y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.maximum(1.0 - y_true * y_pred, 0.0))
def squared_hinge(y_true, y_pred):
"""Squared hinge loss.
Args:
y_true: tf.Tensor.
Tensor of 0s and 1s.
y_pred: tf.Tensor.
Tensor of real values, with same shape as `y_true`.
"""
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32)
return tf.reduce_mean(tf.square(tf.maximum(1.0 - y_true * y_pred, 0.0)))
# Regression metrics
@with_binary_averaging
def mean_squared_error(y_true, y_pred):
"""Mean squared error loss.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
return tf.reduce_mean(tf.square(y_pred - y_true), axis=-2)
@with_binary_averaging
def mean_absolute_error(y_true, y_pred):
"""Mean absolute error loss.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
return tf.reduce_mean(tf.abs(y_pred - y_true), axis=-2)
@with_binary_averaging
def mean_absolute_percentage_error(y_true, y_pred):
"""Mean absolute percentage error loss.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
diff = tf.abs((y_true - y_pred) / tf.clip_by_value(tf.abs(y_true),
1e-8, np.inf))
return 100.0 * tf.reduce_mean(diff, axis=-2)
@with_binary_averaging
def mean_squared_logarithmic_error(y_true, y_pred):
"""Mean squared logarithmic error loss.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
first_log = tf.log(tf.clip_by_value(y_pred, 1e-8, np.inf) + 1.0)
second_log = tf.log(tf.clip_by_value(y_true, 1e-8, np.inf) + 1.0)
return tf.reduce_mean(tf.square(first_log - second_log), axis=-2)
def poisson(y_true, y_pred):
"""Negative Poisson log-likelihood of data `y_true` given predictions
`y_pred` (up to proportion).
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
return tf.reduce_sum(y_pred - y_true * tf.log(y_pred + 1e-8))
def cosine_proximity(y_true, y_pred):
"""Cosine similarity of two vectors.
Args:
y_true: tf.Tensor.
y_pred: tf.Tensor.
Tensors of same shape and type.
"""
y_true = tf.nn.l2_normalize(y_true, len(y_true.shape) - 1)
y_pred = tf.nn.l2_normalize(y_pred, len(y_pred.shape) - 1)
return tf.reduce_sum(y_true * y_pred) | PypiClean |
/GXBubble-0.1.27.tar.gz/GXBubble-0.1.27/gxbubble/iOSBuild.py | import os
import re
import shutil
import sys
import time
import subprocess
from OpenSSL import crypto
from OpenSSL.crypto import load_certificate, FILETYPE_PEM
class IOSBuild:
def __init__(self,ProjectPath):
self.projectPath = ProjectPath;
#获取目标目录文件名
def access_filename(cwd_patch,file_suffix):
for file_name in os.listdir(cwd_patch):
if os.path.splitext(file_name)[1] == file_suffix:
return file_name
return ""
#查询mobileprovision key信息
def value_mobileprovision(self,findKey,valueLabel):
file_mobileprovision = "";
valueLabel_ = valueLabel.replace("/", '')
file_mobileprovision = self.Provision_dis #access_filename(provision_dis)
if not file_mobileprovision.strip():
print("获取配置文件.mobileprovision文件失败,请检查文件是否存在")
sys.exit(1)
string_mobileprovision = self.string_subprocessPopen('security cms -D -i %s' % (file_mobileprovision),None,False)
if findKey == "输出mobileprovision":
return string_mobileprovision
findKey_location = string_mobileprovision.find('%s' % (findKey))
string_mobileprovision = string_mobileprovision[findKey_location:]
findKey_location = string_mobileprovision.find('%s' % valueLabel_)
value = string_mobileprovision[findKey_location + len('%s' % valueLabel_) :string_mobileprovision.find('%s' % valueLabel)]
return value
#执行终端系统命名,获取打印数据
def string_subprocessPopen(self,command,cwd_patch,cancel_newline):
command_file = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,cwd=cwd_patch)
command_file.wait()
command_string = command_file.stdout.read().decode()
if cancel_newline == True:
command_string = command_string.replace("\n", '')
return command_string
# 获取mobileprovision配置文件相关信息
def current_mobileprovision_method(self):
#global uuid_mobileprovision, teamName_mobileprovision, fileName_mobileprovision
#global bundleId_mobileprovision, cerId_mobileprovision
self.uuid_mobileprovision = self.value_mobileprovision("<key>UUID</key>", "</string>")
self.fileName_mobileprovision = self.value_mobileprovision("<key>Name</key>", "</string>")
self.cerId_mobileprovision = self.value_mobileprovision("<key>com.apple.developer.team-identifier</key>", "</string>")
self.teamName_mobileprovision = "iPhone Distribution: " + self.value_mobileprovision("<key>TeamName</key>", "</string>")
self.bundleIdTemp_mobileprovision = self.value_mobileprovision("<key>application-identifier</key>", "</string>")
self.bundleId_mobileprovision = self.bundleIdTemp_mobileprovision[len('%s' % (self.cerId_mobileprovision)) + 1:len('%s' % (self.bundleIdTemp_mobileprovision))]
def OpenSSLGetP12Info(self):
# open it, using password. Supply/read your own from stdin.
p12 = crypto.load_pkcs12(open(self.P12File_dis, 'rb').read(), self.p12Password)
pemInfo = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())
cert = load_certificate(FILETYPE_PEM, pemInfo)
subject = cert.get_subject();
#global SSLCommonName;
self.SSLCommonName = subject.commonName;
def XCodeToSetAutoMatically(self):
print( 'start python script! Delete AutoMatically Manage Signing')
filePath = self.projectPath + "/Unity-iPhone.xcodeproj/project.pbxproj"
if(os.path.exists(filePath)):
f = open(filePath, 'r+')
contents = f.read()
f.seek(0)
f.truncate()
pattern = re.compile(r'(TestTargetID = (\w*)) \/\* Unity-iPhone \*\/;')
f.write(pattern.sub(r'\1;\n\t\t\t\t\t};\n\t\t\t\t\t\2 = {\n\t\t\t\t\t\tProvisioningStyle = Manual;', contents))
f.close()
else:
print("Not Found Path File : "+filePath)
sys.exit(1)
print( 'end python script !')
def XcodeToIPA(self,p12file_dis,provision_dis,exportOptionPlistPath,iPASavePath,P12PassWord,IPAName,SHA1,ShowLog,PCUserName):
#global projectPath, P12File_dis, Provision_dis, ExportOptionPlistPath,IPASavePath,p12Password;
# exportOptionPlist文件路径
self.ExportOptionPlistPath = exportOptionPlistPath
self.P12File_dis = p12file_dis;
self.Provision_dis = provision_dis;
# 打包路径
self.IPASavePath = iPASavePath
self.p12Password = P12PassWord
# 解锁Keychain
os.system ('security unlock-keychain -p ztgame@123 /Users/'+PCUserName+'/Library/Keychains/login.keychain')
os.system('security list-keychains -s /Users/'+PCUserName+'/Library/Keychains/login.keychain')
# 导入证书
if(p12file_dis != "" ):
os.system("security import '+ p12File_dis +' -k /Users/"+PCUserName+"/Library/Keychains/login.keychain -P "+self.p12Password+" -T /usr/bin/codesign")
# 清屏
os.system('clear')
#更改XCode项目 自动选择证书 模式
if (ShowLog):
print("Change XCodeAuto To Manual");
self.XCodeToSetAutoMatically();
#获取 证书的CommonName
print("GetCommonName")
self.OpenSSLGetP12Info()
# 获取mobileprovision配置文件相关信息
print("Get Mobileprovision Info")
self.current_mobileprovision_method();
provision_bds_dir = "/Users/"+PCUserName+"/Library/MobileDevice/Provisioning Profiles/";
if(self.Provision_dis != ""):
distMobileprovision = provision_bds_dir+self.uuid_mobileprovision+".mobileprovision";
if(not os.path.exists(distMobileprovision)):
if(self.uuid_mobileprovision != ""):
shutil.copy(self.Provision_dis,distMobileprovision);
if(not os.path.exists(self.IPASavePath)):
os.makedirs(self.IPASavePath);
self.BuildIPA(self.IPASavePath,self.SSLCommonName,self.uuid_mobileprovision,self.ExportOptionPlistPath,IPAName)
if (ShowLog):
print(" CODE_SIGN_IDENTITY: " + self.SSLCommonName)
print(" PROVISIONING_PROFILE: "+ self.uuid_mobileprovision)
if(SHA1 != ""):
os.system("security delete - certificate - Z "+SHA1);
print("Delete P12");
print("XCodeToIPAOver")
def BuildIPA(self, IPASavePath, SSLCommonName, uuid_mobileprovision, ExportOptionPlistPath,IPAName):
# 进入工程目录
print("os.cdhir " + self.projectPath);
if (os.path.exists(self.projectPath)):
os.chdir(self.projectPath)
else:
print("Not found Path :" + self.projectPath);
sys.exit(1);
# 生成archive文件
print("Clean XCodeBuild")
os.system("xcodebuild clean -project Unity-iPhone.xcodeproj -scheme Unity-iPhone -configuration Release")
print("Achieve Proj");
os.system("xcodebuild archive -project Unity-iPhone.xcodeproj -scheme Unity-iPhone -configuration Release -archivePath " + IPASavePath + "/Unity-iPhone CODE_SIGN_IDENTITY='" + SSLCommonName + "' PROVISIONING_PROFILE=" + uuid_mobileprovision)
# # 生成iPa包
print("ExportAchieve");
if (os.path.exists(IPASavePath + "/Unity-iPhone.xcarchive")):
os.system("xcodebuild -exportArchive -archivePath " + IPASavePath + "/Unity-iPhone.xcarchive -exportPath " + IPASavePath + " -exportOptionsPlist " + ExportOptionPlistPath)
else:
print("Not found xcarchiveFile :"+IPASavePath + "/Unity-iPhone.xcarchive" + "Look Achieve Log");
sys.exit(1);
## 改名
os.chdir(IPASavePath)
if (os.path.exists("Unity-iPhone.ipa")):
print("Rename Unity-iPhone To ProjectName : " + IPAName);
os.rename("Unity-iPhone.ipa", IPAName + ".ipa");
#xcodebuild -project xcodeprojPath -sdk iphoneos -scheme "Unity-iPhone" CONFIGURATION_BUILD_DIR='./' CODE_SIGN_IDENTITY="Yours" PROVISIONING_PROFILE="Yours" | PypiClean |
/DijkstraAlgo-0.0.7-py3-none-any.whl/DijkstraAlgo.py | class DijkstraAlgorithm:
def __init__(self):
'''Initializing the instances'''
self.min_dis_index = []
self.short_dis = []
def minDistance(self, dist, queue):
minimum = float("Inf")
min_index = -1
for i in range(len(dist)):
if dist[i] < minimum and i in queue:
minimum = dist[i]
min_index = i
return min_index
def printPath(self, parent, j):
if parent[j] == -1: # If 'j' is the source
# print (j+1, end=" ")
self.min_dis_index.append(j+1)
return 0
# If 'j' is not the source, call the recursive function
self.printPath(parent, parent[j])
self.min_dis_index.append(j+1)
# print (j+1, end=" ")
def distance(self):
'''Return the Distance of the measured path'''
return self.short_dis
def path(self):
'''Return the Shortest Path'''
return self.min_dis_index
def dijkstraWithPath(self, graph, src, des):
source = src - 1
row = len(graph)
col = len(graph[0])
# initializing all distances are inifinity
dist = [float('Infinity')] * row
# The parent array where to store the shortest path tree
parent = [-1] * row
# Distance of source from itself is zero
dist[source] = 0
queue = [] # An empty list to store all vertices in queue
for i in range(row):
queue.append(i)
# Find the shortest path for all vertices
while queue:
# Select the minimum distance vertex
# from the set of vertices
# which are still in the queue
u = self.minDistance(dist, queue)
# Now remove the minimum distance element which already got
queue.remove(u)
# Consider the vertices which are still in the queue,
# update the distance and parent index of the adjacent vertices
# which are selected
for i in range(col):
if graph[u][i] and i in queue: # If dist[i] in the queue
# and if the total weight of path from source to destination is less than the current value of dist[i]
if dist[u] + graph[u][i] < dist[i]:
dist[i] = dist[u] + graph[u][i]
parent[i] = u
self.short_dis.append(dist[des-1]) # The measured Distance
return self.printPath(parent, des-1) | PypiClean |
/GeophPy-0.32.2.tar.gz/GeophPy-0.32.2/docs/gen-proc.rst | .. _chap-gen-proc-geophpy:
General Processing
******************
All the available processing techniques can be apply to a dataset through a simple command line of the form:
>>> dataset.ProcessingTechnique(option1=10, option2=True, option3='relative',...)
The available `general processing` techniques (i.e. not specific to a particular geophysical method) are listed in the sections below.
Peak filtering
==============
Replace peaks in the dataset.
Peaks are detected using an *Hampel filter* or a *decision-theoretic median filter* and replaced by either NaNs or the local median value.
Examples
--------
* replacing peaks using an *Hampel filter* to detect outliers:
>>> # Peaks are replaced by local median
>>> dataset.peakfilt(method='hampel', halfwidth=5, threshold=3)
>>> # or by NaNs
>>> dataset.peakfilt(method='hampel', halfwidth=5, threshold=3, setnan=True)
* replacing peaks using *decision-theoretic median filter* to detect outliers:
>>> # The threshold is a percentage of the local median value
>>> dataset.peakfilt(method='median', halfwidth=5, threshold=.05, mode='relative')
>>> # or a in raw unit
>>> dataset.peakfilt(method='median', halfwidth=5, threshold=15, mode='absolute')
Principle
---------
A centered 1-D window is slided along a flattened version of the dataset and determines if the central element of the moving window is an outlier (peak) and has to be replaced.
To dertermine outliers, the median of the elements in the moving window is computed and used as a reference value.
If the deviation of the central element of the window to the local median is above a threshold, the central value is replaced by the local median (or NaNs depending on the filter configuration ``setnan={True|False}``).
Two different filters are implemented to dertermine outliers (a median filter and an Hampel filter).
*Threshold value*
For the median filter, the *threshold* can be defined as a percentage of the local median (``mode='relative'``) or directly in raw units (``mode='absolute'``).
For the Hampel filter, the higher the value of the *threshold* is, the less the filter will be selective.
A threshold value of 0 is equivalent to a *standard median filter* (each element replaced by the value of the local median).
*Filters definition*
For :math:`f_{k}` the central element of a moving window :math:`\{ f_{k-K}, ..., f_{k}, ..., f_{k+K}\}` of half-with :math:`K` and of local median :math:`f^{\dagger} = \mbox{median}\{f_{k-K}, ..., f_{k}, ..., f_{k+K}\}`
The Hampel filter is defined as [PeGa16]_ :
.. math::
\mathcal{H}_{K,t}\{f_k\} =
\begin{cases}
f^{\dagger} & \mbox{if } |f_{k} - f^{\dagger}| > t \cdot Sk, \\
f_{k} & \mbox{otherwise},
\end{cases}
.. math::
Sk = 1.4826 \cdot \mbox{median}\{|f_{k-K} - f^{\dagger}|, ..., |f_{k} - f^{\dagger}|, ..., |f_{k+K} - f^{\dagger}|\}
The (decision-theoretic) median filter is defined as:
.. math::
\mathcal{M}_{K,t}\{f_k\} =
\begin{cases}
f^{\dagger} & \mbox{if } |f_{k} - f^{\dagger}| > t, \\
f_{k} & \mbox{otherwise},
\end{cases}
where is :math:`t` the filter threshold.
Parameters
----------
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Name
- Description
- Type
- Value
* - method
- Type of the `decision-theoretic filter` used to determine outliers.
- str
- 'median', 'hampel'
* - halfwidth
- Filter half-width
- int
- 5, 10 20, ...
* - threshold
- Filter threshold parameter. If t=0 and method='hampel', it is equal to a `standard median filter`.
- int
- 0, 1, 2, 3, ...
* - mode
- Median filter mode. If 'relative', the threshold is a percentage of the local median value. If 'absolute', the threshold is a value.
- str
- 'relative', 'absolute'`
* - setnan
- Flag to replace outliers by NaNs instead of the local median.
- bool
- ``True`` or ``False``
* - valfilt
- [For future implementation] Flag to apply filter on the ungridded data values rather than on the gridded data.
- bool
- ``True`` or ``False``
See :ref:`chap-hlvl-api-geophpy` for calling details.
Thresholding
============
Dataset thresholding in a given interval.
Examples
--------
>>> # Replacing out of range values by lower and upper bounds
>>> dataset.threshold(setmin=-10, setmax=10)
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figThreshold2.png | .. figure:: _static/figThresholdHisto2.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Peak filter - Min, max thresholding - dataset. | Peak filter - Min, max thresholding - histogram. |
+-----------------------------------------------------+------------------------------------------------------------------+
>>> # by NaNs
>>> dataset.threshold(setmin=-10, setmax=10, setnan=True)
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figThreshold3.png | .. figure:: _static/figThresholdHisto4.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Thresholding - NaN thresholding - dataset. | Thresholding - NaN thresholding - histogram. |
+-----------------------------------------------------+------------------------------------------------------------------+
>>> # or by each profile's median
>>> dataset.threshold(setmin=-10, setmax=10, setmed=True)
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figThreshold4.png | .. figure:: _static/figThresholdHisto4.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Thresholding - Median thresholding - dataset. | Thresholding - Median thresholding - histogram. |
+-----------------------------------------------------+------------------------------------------------------------------+
Principle
---------
Each value of the dataset are compared to the given interval bounds.
Depending on the filter configuration, values outside of the interval will be replaced by the interval bounds (``setmin=valmin``, ``setmax=valmax``),
NaNs (``setnan=True``),
or the profile's median (``setmed=True``).
Parameters
----------
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Name
- Description
- Type
- Value
* - setmin
- Minimal interval value. All values lower than ``setmin`` will be replaced by ``setmin`` (if both ``setmed`` and ``setnan`` are ``False``).
- float
- -5, 10, 42.5, ...
* - setmax
- Maximal interval value. All values lower than ``setmax`` will be replaced by ``setmax`` (if both ``setmed`` and ``setnan`` are ``False``).
- float
- -5, 10, 42.5, ...
* - setmed
- Flag to replace out of bound data by the profile's median.
- bool
- ``True`` or ``False``
* - setnan
- Flag to replace out of bound data by NaNs.
- bool
- ``True`` or ``False``
* - valfilt
- Flag to apply filter on the ungridded data values rather than on the gridded data.
- bool
- ``True`` or ``False``
See :ref:`chap-hlvl-api-geophpy` for calling details.
Median filtering
================
Apply a median filter (*decision-theoretic* or *standard*) to the dataset.
Examples
--------
>>> # No threshold : standard median filter
>>> dataset.medianfilt(nx=3, ny=3)
+------------------------------------------------+--------------------------------------------------------+
| .. figure:: _static/figMedianFilter1.png | .. figure:: _static/figMedianFilter2.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Median filter - Raw dataset (no threshold). | Median filter - Filtered dataset (no threshold). |
+------------------------------------------------+--------------------------------------------------------+
>>> # Threshold in raw unit : decision-theoretic median filter
>>> dataset.medianfilt(nx=3, ny=3, gap=5)
+---------------------------------------------------------+--------------------------------------------------------------+
| .. figure:: _static/figMedianFilter1.png | .. figure:: _static/figMedianFilter3.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Median filter - Raw dataset (threshold in raw unit). | Median filter - Filtered dataset (threshold in raw unit). |
+---------------------------------------------------------+--------------------------------------------------------------+
Principle
---------
"Median filtering is a non linear process useful in reducing impulsive, or salt-and-pepper noise" [LimJ90]_.
It is capable of smoothing a few out of bounds pixels while preserving image's discontinuities without affecting the other pixels.
For each pixel in the dataset, the local median of the (**nx** x **ny**) neighboring points is calculated.
.. image:: _static/figMedianFilter.png
:height: 5cm
:align: center
A threshold value is defined and if the deviation from the local median is higher than this threshold, then the center pixel value is replaced by the local median value.
The threshold deviation from the local median can be defined:
* in percentage (``percent=10``) or raw units (``gap=5``),
* if no threshold is given, all pixels are replaced by their local medians (*standard median filter*).
Parameters
----------
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Name
- Description
- Type
- Value
* - nx
- Size, in number of sample, of the filer in the x-direction.
- int
- 5, 10, 25, ...
* - ny
- Size, in number of sample, of the filer in the y-direction.
- int
- 5, 10, 25, ...
* - percent
- Threshold deviation (in percents) to the local median value (for absolute field measurements).
- float
-
* - gap
- Threshold deviation (in raw units) to the median value (for relative anomaly measurements).
- float
-
* - valfilt
- [For future implementation] Flag to apply filter on the ungridded data values rather than on the gridded data.
- bool
- ``True`` or ``False``
See :ref:`chap-hlvl-api-geophpy` for calling details.
.. _chap-gen-proc-festoon-geophpy:
Festoon filtering
=================
Dataset destaggering.
The festoon filter is a destaggering filter that reduces the positioning error along the survey profiles that result in a festoon-like effect.
An *optimum shift* is estimated based on the correlation of a particular profile and the mean of its two neighboring profiles.
This filter needs to be done as an early step in the processing flow as it needs to be done preferably before interpolation (or with no interpolation in the x-axis).
**Filter applications:** *data destaggering*
.. warning::
The correlation map computation needs a regular grid WITHOUT interpolation between profiles (i.e. the x-axis direction).
The correlation is done between each even profiles number and the mean of its two nearest neighbors.
In the case of an interpolation in the x-axis, an interpolated (even) profile will be equal (or quasi-equal) to the mean of its two nearest neighbors.
The computed map will be close to an auto-correlation map, resulting in a null shift optimum shift.
Examples
--------
>>> # Gridding data without interpolation
>>> dataset.interpolate(interpolation='none')
>>>
>>> # Uniform shift
>>> dataset.festoonfilt(method='Crosscorr', uniformshift=True)
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figFestoonFilter1.png | .. figure:: _static/figFestoonFilter2.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Destaggering - Raw dataset (uniform shift). | Destaggering - Filtered dataset (uniform shift). |
+-----------------------------------------------------+------------------------------------------------------------------+
>>> # Non uniform shift
>>> dataset.festoonfilt(method='Crosscorr', uniformshift=False)
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figFestoonFilter1.png | .. figure:: _static/figFestoonFilter3.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Destaggering - Raw dataset (non uniform shift). | Destaggering - Filtered dataset (non uniform shift). |
+-----------------------------------------------------+------------------------------------------------------------------+
Principle
---------
For every even profiles (columns) in the dataset, an optimum shift is estimated based on the correlation of the profile and the mean of its two nearest neighbors.
For each possible shift, a correlation map is hence computed and used to estimate the optimum shift (max of correlation).
The optimum shift can be set to be uniform throughout the map (``uniformshift=True``) or different for each profile (``uniformshift=False``).
If the shift is set uniform, the mean correlation profile is used as correlation map.
At the top and bottom edges of the correlation map (high shift values), high correlation values can arrise from low sample correlation calculation.
To prevent those high correlation values to drag the best shift estimation, a limitation is set to only consider correlation with at least 50% overlap between profiles.
Similarly, a minimum correlation value (``corrmin``) can be defined to prevent profile's shift if the correlation is too low.
>>> # Correaltion map
>>> dataset.correlation_plotmap()
>>> # Correaltion profile
>>> dataset.correlation_plotsum()
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figFestoonFilterCorrMap.png | .. figure:: _static/figFestoonFilterCorrSum.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Destaggering - Correlation map. | Destaggering - Mean correlation profile. |
+-----------------------------------------------------+------------------------------------------------------------------+
Parameters
----------
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Name
- Description
- Type
- Value
* - method
- Correlation method to use to compute the correlation coefficient in the correlation map.
- str
- 'Crosscorr', 'Pearson', 'Spearman' or 'Kendall'
* - shift
- Optional shift value (in pixels) to apply to the dataset profiles. If shit=0, the shift will be determined for each profile by correlation with neighbors.
If shift is a vector each value in shift will be applied to its corresponding even profile.
In that case shift must have the same size as the number of even profiles.
- int or array of int
- 3, 5, 10, [2, 3, 4, ..., 3, 4] or 0
* - corrmin
- Minimum correlation coefficient value to allow shifting.
- float (in the range [0-1])
- 0.6, 0.8
* - uniformshift
- Flag to use a uniform shift on the map or a different one for each profile.
- bool
- ``True`` or ``False``
* - setmin
- Data values lower than ``setmin`` are ignored.
- float
- 12, 44.5, ..., ``None``
* - setmax
- Data values higher than ``setmin`` are ignored.
- float
- 12, 44.5, ..., ``None``
* - valfilt
- [For future implementation] Flag to apply filter on the ungridded data values rather than on the gridded data.
- bool
- ``True`` or ``False``
See :ref:`chap-hlvl-api-geophpy` for calling details.
Detrending
==========
... To Be Developed ...
Subtracting a polynomial fit for each profile in the dataset.
Regional trend filtering
========================
... To Be Developed ...
Remove the background (or regional response) from a dataset to highlight the sub-surface features of interest.
Example
-------
Principle
---------
Parameters
----------
See :ref:`chap-hlvl-api-geophpy` for calling details.
Wallis filtering
================
The Wallis filter is a locally adaptative contrast enhancement filter.
Based on the local statistical properties of sub-window in the image,
it adjusts brightness values (grayscale image) in the local window so that the local mean and standard deviation match target values.
**Filter applications:** *contrast enhancement*
Examples
--------
>>> dataset.wallisfilt()
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figWallisFilter1.png | .. figure:: _static/figWallisFilter2.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Wallis Filter - Raw dataset. | Wallis Filter - Filtered dataset. |
+-----------------------------------------------------+------------------------------------------------------------------+
Principle
---------
A window of size (**nx**, **ny**) is slided along the image and at each pixel the Wallis operator is calculated.
The Wallis operator is defined as [STHH90]_:
.. math::
\frac{A \sigma_d}{A \sigma_{(x,y)} + \sigma_d} [f_{(x,y)} - m_{(x,y)}] + \alpha m_d + (1 - \alpha)m_{(x,y)}
where:
* :math:`A` is the amplification factor for contrast,
* :math:`\sigma_d` is the target standard deviation,
* :math:`\sigma_{(x,y)}` is the standard deviation in the current window,
* :math:`f_{(x,y)}` is the center pixel of the current window,
* :math:`m_{(x,y)}` is the mean of the current window,
* :math:`\alpha` is the edge factor (controlling portion of the observed mean, and brightness locally to reduce or increase the total range),
* :math:`m_d` is the target mean.
As the Wallis filter is design for grayscale image, the data are internally converted to brightness level before applying the filter.
The conversion is based on the minimum and maximum value in the dataset and uses 256 levels (from 0 to 255).
The filtered brightness level are converted back to data afterwards.
A quite large window is recommended to ensure algorithm stability.
Parameters
----------
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Name
- Description
- Type
- Value
* - nx
- Size, in number of sample, of the filer in the x-direction.
- int
- 5, 10, 25, ...
* - ny
- Size, in number of sample, of the filer in the y-direction.
- int
- 5, 10, 25, ...
* - targmean
- The target standard deviation (in gray level).
- int
- 125
* - setgain
- Amplification factor for contrast.
- int
- 8
* - limitstdev
- imitation on the window standard deviation to prevent too high gain value if data are dispersed.
- int
- 25
* - edgefactor
- Brightness forcing factor (:math:`\alpha`), controls ratio of edge to background intensities.
- float (in the range of [0,1])
-
* - valfilt
- [For future implementation] Flag to apply filter on the scattered data values rather than on the gridded data.
- bool
- ``True`` or ``False``
See :ref:`chap-hlvl-api-geophpy` for calling details.
Ploughing filtering
===================
Directional filter.
Apply a directional filter to reduce agricultural ploughing effect in the dataset (or any other directional feature).
Examples
--------
>>> dataset.ploughfilt(apod=0, azimuth=90, cutoff=100, width=3)
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figPloughFilter1.png | .. figure:: _static/figPloughFilter2.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Plough Filter - Raw dataset. | Plough Filter - Filtered dataset. |
+-----------------------------------------------------+------------------------------------------------------------------+
>>> # Raw dataset spectral plot
>>> dataset.spectral_plotmap(plottype='magnitude', logscale=True)
>>> dataset.plot_directional_filter(apod=0, azimuth=90, cutoff=100, width=3)
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figPloughFilter3.png | .. figure:: _static/figPloughFilter4.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Plough Filter - Raw magnitude spectrum. | Plough Filter - Directional filter. |
+-----------------------------------------------------+------------------------------------------------------------------+
Principle
---------
The directional feature in the dataset is filtered in the spectral domain using the combination (:math:`\mathcal{F}`) of
a *gaussian low-pass filter* of order 2 (:math:`\mathcal{F}_{GLP}`) and
a *gaussian directional filter* (:math:`\mathcal{F}_{DIR}`) defined as [TABB01]_ :
.. math::
\mathcal{F}(\rho, \theta, f_c) &= \mathcal{F}_{GLP}(\rho, f_c) \ast \mathcal{F}_{DIR}(\rho, \theta) \\
&= e^{-(\rho / f_c)^2} \ast ( 1-e^{-\rho^2 / \tan(\theta-\theta_0)^n} )
where:
* :math:`\rho` and :math:`\theta` are the current point polar coordinates,
* :math:`f_c` is the gaussian low-pass filter cutoff frequency,
* :math:`\theta_0` is the directional filter's azimuth,
* :math:`n` is the parameter that controls the filter width.
The filter's width is determined by the value of :math:`n` (:numref:`figPloughFiltn=2`, :numref:`figPloughFiltn=3` and :numref:`figPloughFiltn=4`) and
the *gaussian low-pass filter* component is neglected if no cutoff frequency is defined (``cutoff=None``, see :numref:`figPloughFiltfc=none`)
>>> dataset.plot_directional_filter(azimuth=30, cutoff=50, width=2)
>>> dataset.plot_directional_filter(azimuth=30, cutoff=50, width=3)
+------------------------------------------------------+----------------------------------------------------------------+
| .. _figPloughFiltn=2: | .. _figPloughFiltn=3: |
| | |
| .. figure:: _static/figPloughFilter5.png | .. figure:: _static/figPloughFilter6.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Plough Filter - Directional Filter (**n=2**) | Plough Filter - Directional Filter (**n=3**) |
+------------------------------------------------------+----------------------------------------------------------------+
>>> dataset.plot_directional_filter(azimuth=30, cutoff=50, width=4)
>>> dataset.plot_directional_filter(azimuth=30, cutoff=None, width=4)
+------------------------------------------------------+----------------------------------------------------------------+
| .. _figPloughFiltn=4: | .. _figPloughFiltfc=none: |
| | |
| .. figure:: _static/figPloughFilter7.png | .. figure:: _static/figPloughFilter8.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Plough Filter - Directional Filter (**n=4**) | Plough Filter - Pure directional filter (**fc=None**) |
+------------------------------------------------------+----------------------------------------------------------------+
Parameters
----------
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Name
- Description
- Type
- Value
* - apod
- Apodization factor (%), to limit Gibbs phenomenon at jump discontinuities.
- float
- 0, 5, 10, 20, 25, ...
* - azimuth
- Filter azimuth in degree.
- float
- 0, 10, 33.25, ...
* - cutoff
- cutoff spatial frequency (in number of sample).
- int
- 5, 10, 100, ... or ``None``
* - width
- ilter width parameter.
- int
- 2, 3, 4, ...
* - valfilt
- [For future implementation] Flag to apply filter on the scattered data values rather than on the gridded data.
- bool
- ``True`` or ``False``
See :ref:`chap-hlvl-api-geophpy` for calling details.
Zero-Mean Traversing
====================
Subtracts the mean (or median) of each traverse (profile) in the dataset.
**Filter applications:** *(magnetic) probe compensation, data destriping, edge matching...*
Examples
--------
>>> # Raw data display
>>> dataset.plot(plottype='2D-SURFACE')[0].show()
>>> dataset.histo_plot(cmapdisplay=False, coloredhisto=True).show()
+-----------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figZeroMeanFilter1.png | .. figure:: _static/figZeroMeanFilterHisto1.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Zero-mean traverse - Raw dataset. | Zero-mean traverse - Raw histogram. |
+-----------------------------------------------------+------------------------------------------------------------------+
>>> # Zero-Mean Traverse data
>>> dataset.zeromeanprofile(setvar='mean')
+--------------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figZeroMeanFilter2.png | .. figure:: _static/figZeroMeanFilterHisto2.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Zero-mean traverse - Filtered dataset (zero-mean). | Zero-mean traverse - Filtered histogram (zero-mean). |
+--------------------------------------------------------+------------------------------------------------------------------+
>>> # Zero-Median Traverse data
>>> dataset.zeromeanprofile(setvar='median')
+----------------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figZeroMeanFilter3.png | .. figure:: _static/figZeroMeanFilterHisto3.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Zero-mean traverse - Filtered dataset (zero-median). | Zero-mean traverse - Filtered histogram (zero-median). |
+----------------------------------------------------------+------------------------------------------------------------------+
Principle
---------
For each traverse (profile) in the dataset, the mean (or median) is calculated and subtracted, leading to a zero-mean (or zero-median) survey [AsGA08]_.
.. note::
This filter is strictly equivalent to the constant destriping filter in configuration *'mono'* sensor, using *'additive'* destriping method and *Nprof=0*:
>>> dataset.zeromeanprofile(setvar='median')
>>> dataset.destripecon(Nprof=0, method='additive', config='mono', reference='median')
Parameters
----------
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Name
- Description
- Type
- Value
* - setvar
- Profile's statistical property be subtracted from each profile.
- str
- 'mean' or 'median'
* - setmin
- Data values lower than ``setmin`` are ignored.
- float
- 12, 44.5, ..., ``None``
* - setmax
- Data values higher than ``setmin`` are ignored.
- float
- 12, 44.5, ..., ``None``
* - valfilt
- [For future implementation] Flag to apply filter on the scattered data values rather than on the gridded data.
- bool
- ``True`` or ``False``
See :ref:`chap-hlvl-api-geophpy` for calling details.
.. _chap-gen-proc-destipcon-geophpy:
Constant destriping
===================
Remove the strip noise effect from the dataset.
The strip noise effect arises from profile-to-profile differences in sensor height, orientation, drift or sensitivity (multi-sensors array).
Constant destriping is done using Moment Matching method [GaCs00]_.
**Filter applications:** *(magnetic) probe compensation, data destriping, edge matching...*
Examples
--------
>>> dataset.destripecon(Nprof=4, method='additive')
+----------------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figDestriping1.png | .. figure:: _static/figDestriping2.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Destriping - Raw dataset. | Destriping - Filtered dataset. |
+----------------------------------------------------------+------------------------------------------------------------------+
Principle
---------
In constant destriping, a linear relationship is assumed between profile-to-profile offset (means) and gain (standard deviation).
The statistical moments (mean :math:`m_i` and standard deviation :math:`\sigma_i`) of each profile in the dataset are computed and matched to reference values.
**Reference values**
Typical reference values are:
* the mean (:math:`m_d`) and standard deviation (:math:`\sigma_d`) of the :math:`N` neighboring profiles (``Nprof=4``),
* the mean and standard deviation of the global dataset (``Nprof='all'``),
* the mean and standard deviation of each profiles (``Nprof=0``, zero-mean traverse),
* alternatively, one can use the median and interquartile range instead of mean and standard deviation (``reference='median'``).
**Additive vs multiplicative destriping**
The corrected value can be calculated by
* an additive relation (``method='additive'``) [RiJi06]_, [Scho07]_:
.. math::
f_{corr} = (f - m_i) \frac{\sigma_d}{\sigma_i} + m_d
* or a multiplicative relation (``method='multiplicative'``):
.. math::
f_{corr} = f \frac{\sigma_d}{\sigma_i} \frac{m_d}{m_i}
where
:math:`f_{corr}` is the corrected value,
:math:`\sigma_d` is the reference standard deviation,
:math:`\sigma_i` is the current profile standard deviation,
:math:`f` is the current value,
:math:`m_i` is the current profile and
:math:`m_d` is the reference mean.
.. note::
Ground surveys (unlike remote sensing) often demonstrate profile-to-profile offsets but rarely gain changes so that only matching profiles mean (or median) is usually appropriate (``configuration='mono'``):
.. math::
f_{corr} = f - m_i + m_d
This is similar to the zero-mean filter, with the difference that the profile mean (or median) is set to a reference value instead of zero.
If ``Nprof`` is set to zero for the reference computation, this is strictly equivalent to the zero-mean filter:
>>> dataset.destripecon(Nprof=0, method='additive', config='mono', reference='mean')
>>> # Strictly equals to
>>> dataset.zeromeanprofile(setvar='mean')
*Summary of the destiping configurations*
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Configuration / Method
- ``'additive'``
- ``'multiplicative'``
* - ``'mono'``
- :math:`f_{corr} = f - m_i + m_d`
- :math:`f_{corr} = f \frac{m_d}{m_i}`
* - ``'multi'``
- :math:`f_{corr} = (f - m_i) \frac{\sigma_d}{\sigma_i} + m_d`
- :math:`f_{corr} = f \frac{\sigma_d}{\sigma_i} \frac{m_d}{m_i}`
**Mean cross-track profile display**
The data mean cross-track profile before and after destriping can be displayed as follow:
>>> dataset.meantrack_plot(Nprof=4, method='additive', reference='median', plotflag='raw')
>>> dataset.meantrack_plot(Nprof=4, method='additive', reference='median', plotflag='both')
+----------------------------------------------------------+------------------------------------------------------------------+
| .. figure:: _static/figDestriping3.png | .. figure:: _static/figDestriping4.png |
| :height: 6cm | :height: 6cm |
| :align: center | :align: center |
| | |
| Destriping - mean cross-track profile (before). | Destriping - mean cross-track (after). |
+----------------------------------------------------------+------------------------------------------------------------------+
Parameters
----------
.. list-table::
:header-rows: 1
:widths: auto
:stub-columns: 1
:align: center
* - Name
- Description
- Type
- Value
* - Nprof
- Number of neighboring profiles used to compute the the reference values.
- int or 'all'
- 0, 4 ,... or 'all'
* - setmin
- Data values lower than ``setmin`` are ignored.
- float
- 12, 44.5, ..., ``None``
* - setmax
- Data values higher than ``setmin`` are ignored.
- float
- 12, 44.5, ..., ``None``
* - method
- Destriping methode.
- str
- 'additive' or 'multiplicative'
* - reference
- References used for destriping.
- str
- 'mean' or 'median'
* - config
- Sensors configuration.
- str
- 'mono' or 'multi'
* - valfilt
- [For future implementation] Flag to apply filter on the scattered data values rather than on the gridded data.
- bool
- ``True`` or ``False``
See :ref:`chap-hlvl-api-geophpy` for calling details.
Curve destriping
================
... To Be Developed ...
Remove from the dataset the strip noise effect by fitting and subtracting a MEAN polynomial curve to each profile on the dataset.
| PypiClean |
/LUBEAT-0.13.1-cp38-cp38-macosx_10_9_x86_64.whl/econml/automated_ml/_automated_ml.py |
# AzureML
from azureml.core.experiment import Experiment
from azureml.core import Workspace
from azureml.train.automl.automlconfig import AutoMLConfig
from azureml._base_sdk_common.common import ProjectSystemException
from sklearn.multioutput import MultiOutputRegressor
# helper imports
import time
import copy
"""Automated Machine Learning Support For EconML Estimators. This allows analysts
to use AutomatedML to automate the process of selecting models for models Y, T,
and final of their causal inferenve estimator.
"""
LINEAR_MODELS_SET = set([
"ElasticNet",
"LassoLars",
"LinearRegressor",
"FastLinearRegressor",
"OnlineGradientDescentRegressor",
"SGDRegressor"
])
SAMPLE_WEIGHTS_MODELS_SET = set([
"ElasticNet",
"LightGBM",
"GradientBoostingRegressor",
"DecisionTreeRegressor",
"KNeighborsRegressor",
"LassoLars",
"SGDRegressor",
"RandomForestRegressor",
"ExtraTreesRegressor",
"LinearRegressor",
"FastLinearRegressor",
"OnlineGradientDescentRegressor"
])
def setAutomatedMLWorkspace(create_workspace=False,
create_resource_group=False, workspace_region=None, *,
auth=None, subscription_id, resource_group, workspace_name):
"""Set configuration file for AutomatedML actions with the EconML library. If
``create_workspace`` is set true, a new workspace is created
for the user.
Parameters
----------
create_workspace: Boolean, optional, default False
If set to true, a new workspace will be created if the specified
workspace does not exist.
create_resource_group: Boolean, optional, default False
If set to true, a new resource_group will be created if the specified
resource_group does not exist.
workspace_region: String, optional
Region of workspace, only necessary if create_new is set to true and a
new workspace is being created.
auth: azureml.core.authentication.AbstractAuthentication, optional
If set EconML will use auth object for handling Azure Authentication.
Otherwise, EconML will use interactive automation, opening an
authentication portal in the browser.
subscription_id: String, required
Azure subscription ID for the subscription under which to run the models
resource_group: String, required
Name of resource group of workspace to be created or set.
workspace_name: String, required
Name of workspace of workspace to be created or set.
"""
try:
ws = Workspace(subscription_id=subscription_id, resource_group=resource_group,
workspace_name=workspace_name, auth=auth)
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
print("Workspace configuration has succeeded.")
except ProjectSystemException:
if(create_workspace):
if(create_resource_group):
print("Workspace not accessible. Creating a new workspace and \
resource group.")
ws = Workspace.create(name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
location=workspace_region,
create_resource_group=create_resource_group,
sku='basic',
auth=auth,
exist_ok=True)
ws.get_details()
else:
print("Workspace not accessible. Set \
create_resource_group = True and run again to create a new \
workspace and resource group.")
else:
print("Workspace not accessible. Set create_workspace = True \
to create a new workspace.")
def addAutomatedML(baseClass):
"""
Enables base class to use EconAutoMLConfig objects instead of models
by adding the AutomatedMLMixin to specified base class. Once this Mixin
has been added, EconML classes can be initialized with EconAutoMLConfig
objects rather than scikit learn models.
Parameters
----------
baseClass: Class, required
Definition of a class that will serve as the parent class of the
AutomatedMLMixin.
Returns
----------
automatedMLClass: Class
A modified version of ``baseClass`` that accepts the parameters of the
AutomatedML Mixin rather in addition to the original class objects.
"""
class AutomatedMLClass(AutomatedMLMixin, baseClass):
pass
return AutomatedMLClass
class AutomatedMLModel():
def __init__(self, automl_config, workspace, experiment_name_prefix="aml_experiment"):
"""
scikit-learn style model fitted and specified with automatedML.
automatedML uses AzureML's Automated Machine Learning library
to automatically preprocess data, specify features, and
selects a model given a pair of training data and labels.
Parameters
----------
automl_config: azureml.train.automl.automlconfig.AutoMLConfig, required
Configuration for submitting an Automated Machine Learning experiment in Azure Machine Learning.
This configuration object contains and persists the parameters for configuring the experiment
run parameters, as well as the training data to be used at run time. For guidance on selecting
your settings, you may refer to
https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-auto-train.
workspace: azureml.core.experiment.Experiment, optional
The main experiment to associated with the automatedML runs for
experiment_name_prefix: String, optional
Prefix of experiment name for generated by SciKitAutoMLModel. The full name of
the experiment will be {EXPERIMENT_NAME_PREFIX}_{INITIALIZE_EXPERIMENT_TIMESTAMP}.
Must be comprised of alphanumeric characters, hyphens, underscores and have at most 18 characters.
"""
self._innerModel = _InnerAutomatedMLModel(
automl_config, workspace, experiment_name_prefix=experiment_name_prefix)
def fit(self, X, y, sample_weight=None):
"""
Select and fit model.
Parameters
----------
X: numpy.ndarray or pandas.DataFrame, required
The training features to use when fitting pipelines during AutoML experiment.
y: numpy.ndarray or pandas.DataFrame, required
Training labels to use when fitting pipelines during AutoML experiment.
sample_weight: numpy.ndarray or pandas.DataFrame, optional
The weight to give to each training sample when running fitting pipelines,
each row should correspond to a row in X and y data.
experiment_name_prefix: String, optional
Prefix of experiment name for generated by SciKitAutoMLModel. The full name of
the experiment will be {EXPERIMENT_NAME_PREFIX}_{INITIALIZE_EXPERIMENT_TIMESTAMP}.
Must be comprised of alphanumeric characters, hyphens, underscores and have at most 18 characters.
"""
# if y is a multioutput model
if y.ndim > 1:
# Make sure second dimension has 1 or more item
if y.shape[1] > 1:
# switch _inner Model to a MultiOutputRegressor
self._innerModel = MultiOutputRegressor(self._innerModel)
self._innerModel.fit(X, y, sample_weight=sample_weight)
return
else:
# flatten array as automl only takes vectors for y
y = y.flatten()
self._innerModel.fit(X, y, sample_weight=sample_weight)
def predict(self, X):
"""
Predict using selected and fitted model.
X: numpy.ndarray or pandas.DataFrame, required
The training features to use for predicting labels
"""
return self._innerModel.predict(X)
def predict_proba(self, X):
"""
Predict using selected and fitted model.
X: numpy.ndarray or pandas.DataFrame, required
The training features to use for predicting label probabilities.
"""
return self._innerModel.predict_proba(X)
class _InnerAutomatedMLModel():
# Inner single model to be passed that wrapper can use to pass into MultiOutputRegressor
def __init__(self, automl_config, workspace,
experiment_name_prefix="aml_experiment"):
self._show_output = automl_config._show_output
self._workspace = workspace
self._automl_config = automl_config
self._experiment_name_prefix = experiment_name_prefix
def get_params(self, deep=True):
# Must be implemented for MultiOutputRegressor to view _InnerAutomatedMLModel
# as an sklearn estimator
return {
'workspace': self._workspace,
'automl_config': self._automl_config,
'experiment_name_prefix': self._experiment_name_prefix
}
def fit(self, X, y, sample_weight=None):
# fit implementation for a single output model.
# Create experiment for specified workspace
automl_config = copy.deepcopy(self._automl_config)
current_time = time.localtime()
current_time_string = time.strftime('%y_%m_%d-%H_%M_%S', current_time)
experiment_name = self._experiment_name_prefix + "_" + current_time_string
self._experiment = Experiment(self._workspace, experiment_name)
# Configure automl_config with training set information.
automl_config.user_settings['X'] = X
automl_config.user_settings['y'] = y
automl_config.user_settings['sample_weight'] = sample_weight
# Wait for remote run to complete, the set the model
print("Experiment " + experiment_name + " has started.")
local_run = self._experiment.submit(automl_config, show_output=self._show_output)
print("Experiment " + experiment_name + " completed.")
_, self._model = local_run.get_output()
def predict(self, X):
return self._model.predict(X)
def predict_proba(self, X):
return self._model.predict_proba(X)
class AutomatedMLMixin():
def __init__(self, *args, **kwargs):
"""
Mixin enabling users to leverage automatedML as their model of choice in
Double Machine Learners and Doubly Robust Learners. It instantiates
AutomatedMLModels for each automl_config provided and pass them as
parameters into its parent class.
Parameters
----------
args: List, optional
args that are passed in order to initiate the final automatedML run.
Any arg, that is an AutoMLConfig, will be converted into as
AutomatedMLModel.
kwargs: Dict, optional
kwargs that are passed in order to initiate the final automatedML run.
Any kwarg, that is an AutoMLConfig, will be converted into as
AutomatedMLModel.
"""
# Loop through the kwargs and args if any of them is an AutoMLConfig file, pass them
# create model and pass model into final.
new_args = ()
for idx, arg in enumerate(args):
# If item is an automl config, get its corresponding
# AutomatedML Model and add it to new_Args
if isinstance(arg, EconAutoMLConfig):
arg = self._get_automated_ml_model(arg, f"arg{idx}")
new_args += (arg,)
for key in kwargs:
kwarg = kwargs[key]
# If item is an automl config, get its corresponding
# AutomatedML Model and set it for this key in
# kwargs
if isinstance(kwarg, EconAutoMLConfig):
kwargs[key] = self._get_automated_ml_model(kwarg, key)
super().__init__(*new_args, **kwargs)
def _get_automated_ml_model(self, automl_config, prefix):
# takes in either automated_ml config and instantiates
# an AutomatedMLModel
# The prefix can only be 18 characters long
# because prefixes come from kwarg_names, we must ensure they are
# short enough.
prefix = prefix[:18]
# Get workspace from config file.
workspace = Workspace.from_config()
return AutomatedMLModel(automl_config, workspace,
experiment_name_prefix=prefix)
class EconAutoMLConfig(AutoMLConfig):
def __init__(self, sample_weights_required=False, linear_model_required=False, show_output=False, **kwargs):
"""
Azure AutoMLConfig object with added guards to ensure correctness when used
with EconML
Parameters
----------
sample_weights_required: Boolean, optional, default False
If set true, only models that require sample weights will be selected during
AutomatedML.
linear_model_required: Boolean, optional, default False
If set to true, only linear models will be selected during AutomatedML.
show_output: Boolean, optional, default False
If set to true, outputs for the corresponding AutomatedMLModel
will be shown when it is fitted.
kwargs: list, optional
List of kwargs to be passed to a correspodning AutoML Config object.
To view the full documentation of the kwargs, you may refer to
https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client
/azureml.train.automl.automlconfig.automlconfig?view=azure-ml-py
"""
whitelist_models = None
if linear_model_required and sample_weights_required:
# Take the intersect of the white for sample
# weights and linear models
whitelist_models = list(LINEAR_MODELS_SET.intersection(SAMPLE_WEIGHTS_MODELS_SET))
else:
if(linear_model_required):
whitelist_models = list(LINEAR_MODELS_SET)
if(sample_weights_required):
whitelist_models = list(SAMPLE_WEIGHTS_MODELS_SET)
kwargs['whitelist_models'] = whitelist_models
# show output is not stored in the config in AutomatedML, so we need to make it a field.
self._show_output = show_output
super().__init__(**kwargs) | PypiClean |
/HTML_Auto-0.0.5-py3-none-any.whl/HTML/Auto.py | import re
from struct import unpack
__version__='0.0.5'
class Tag:
def __init__( self, params={} ):
self.encode = 1 if 'encode' in params else 0
self.sort = 1 if 'sort' in params else 0
self.level = params['level'] if 'level' in params else 0
self.encodes = params['encodes'] if 'encodes' in params else ''
self.indent = params['indent'] if 'indent' in params else ''
self.newline = "\n" if 'indent' in params else ''
self.encoder = Encoder()
def tag( self, params={} ):
tag = params['tag']
cdata = params['cdata'] if 'cdata' in params else ''
attr = params['attr'] if 'attr' in params else {}
ctype = type( cdata )
rendered = ''
no_indent = 0
if not type( attr ) is Attr:
attr = Attr( attr, self.sort )
if ctype is list:
if type( cdata[0] ) is dict:
self.level += 1
rendered = self.newline
for hash in cdata:
rendered += self.tag( hash )
self.level -= 1
else:
string = ''
for scalar in cdata:
string += self.tag({ 'tag': tag, 'attr': attr, 'cdata': scalar })
return string
elif ctype is dict:
self.level += 1
rendered = self.newline + self.tag( cdata )
self.level -= 1
else:
# empty tag
if not len( str( cdata ) ):
return '<' + tag + str(attr) + ' />'
rendered = self.encoder.encode( cdata, self.encodes ) if self.encode else cdata
no_indent = 1
indent = '' if no_indent else self.indent * self.level
return (self.indent * self.level) + \
'<' + tag + str( attr ) + '>' + \
str( rendered ) + indent + \
'</' + tag + '>' + self.newline
class Attr:
def __init__( self, params={}, sort=0 ):
self.params = params
self.sort = sort
def __str__( self ):
attr = ''
seen = {}
keys = sorted( self.params.keys() ) if self.sort else self.params.keys()
for key in keys:
if not key in seen:
val = self.params[key]
val = self.stringify( val ) if type( val ) is dict else val
val = self.rotate( val ) if type( val ) is list else val
attr += ' %s="%s"' % ( self.key( key ), self.val( val ) )
seen[key] = 1
return attr
def key( self, key ):
key = re.sub( '\s+', '', key )
key = re.sub( '["\'>=\/]', '', key )
return key
def val( self, val ):
val = str(val)
val = re.sub( '"', '', val )
return val.strip()
def rotate( self, array ):
val = array.pop(0)
array.append( val )
return val
def stringify( self, attrs ):
keys = sorted( attrs.keys() ) if self.sort else attrs.keys()
vals = []
for key in keys:
val = attrs[key]
if type( val ) is list:
val = self.rotate( val )
elif type( val ) is dict:
k = sorted( val.keys() ) if self.sort else val.keys()
val = k[0]
vals.append( '%s: %s' % ( key, val ) )
trail = ';' if len( vals ) else ''
return '; '.join( vals ) + trail
class Encoder:
def encode( self, string, *args):
def num_entity(char):
try:
hex = unpack( 'B', bytes( char, 'utf_8' ) )
except TypeError:
hex = unpack( 'B', bytes( char ) )
return '&#x%X;' % hex[0]
def default(m):
if m.group(0) in self.char2entity: return self.char2entity[ m.group(0) ]
else: return num_entity( m.group(0) )
if args and len(str(args[0])):
lookup = {}
def custom(m):
if m.group(0) in lookup: return lookup[ m.group(0) ]
else: return m.group(0)
for c in str( args[0] ):
lookup[c] = num_entity(c) if not c in self.char2entity else self.char2entity[c]
string = re.sub( r'.', custom, string )
else:
# Encode control chars, high bit chars and '<', '&', '>', ''' and '"'
string = re.sub( r"([^\n\r\t !\#\$%\(-;=?-~])", default, string )
return string
def encode_hex( self, *args ):
tmp = self.char2entity
self.char2entity = {}
string = self.encode( *args )
self.char2entity = tmp
return string
def __init__(self):
self.entity2char = {
'amp' : '&', # ampersand
'gt' : '>', # greater than
'lt' : '<', # less than
'quot' : '"', # double quote
'apos' : "'", # single quote
# PUBLIC ISO 8879-1986//ENTITIES Added Latin 1//EN//HTML
'AElig' : chr( 198 ), # capital AE diphthong (ligature)
'Aacute': chr( 193 ), # capital A, acute accent
'Acirc' : chr( 194 ), # capital A, circumflex accent
'Agrave': chr( 192 ), # capital A, grave accent
'Aring' : chr( 197 ), # capital A, ring
'Atilde': chr( 195 ), # capital A, tilde
'Auml' : chr( 196 ), # capital A, dieresis or umlaut mark
'Ccedil': chr( 199 ), # capital C, cedilla
'ETH' : chr( 208 ), # capital Eth, Icelandic
'Eacute': chr( 201 ), # capital E, acute accent
'Ecirc' : chr( 202 ), # capital E, circumflex accent
'Egrave': chr( 200 ), # capital E, grave accent
'Euml' : chr( 203 ), # capital E, dieresis or umlaut mark
'Iacute': chr( 205 ), # capital I, acute accent
'Icirc' : chr( 206 ), # capital I, circumflex accent
'Igrave': chr( 204 ), # capital I, grave accent
'Iuml' : chr( 207 ), # capital I, dieresis or umlaut mark
'Ntilde': chr( 209 ), # capital N, tilde
'Oacute': chr( 211 ), # capital O, acute accent
'Ocirc' : chr( 212 ), # capital O, circumflex accent
'Ograve': chr( 210 ), # capital O, grave accent
'Oslash': chr( 216 ), # capital O, slash
'Otilde': chr( 213 ), # capital O, tilde
'Ouml' : chr( 214 ), # capital O, dieresis or umlaut mark
'THORN' : chr( 222 ), # capital THORN, Icelandic
'Uacute': chr( 218 ), # capital U, acute accent
'Ucirc' : chr( 219 ), # capital U, circumflex accent
'Ugrave': chr( 217 ), # capital U, grave accent
'Uuml' : chr( 220 ), # capital U, dieresis or umlaut mark
'Yacute': chr( 221 ), # capital Y, acute accent
'aacute': chr( 225 ), # small a, acute accent
'acirc' : chr( 226 ), # small a, circumflex accent
'aelig' : chr( 230 ), # small ae diphthong (ligature)
'agrave': chr( 224 ), # small a, grave accent
'aring' : chr( 229 ), # small a, ring
'atilde': chr( 227 ), # small a, tilde
'auml' : chr( 228 ), # small a, dieresis or umlaut mark
'ccedil': chr( 231 ), # small c, cedilla
'eacute': chr( 233 ), # small e, acute accent
'ecirc' : chr( 234 ), # small e, circumflex accent
'egrave': chr( 232 ), # small e, grave accent
'eth' : chr( 240 ), # small eth, Icelandic
'euml' : chr( 235 ), # small e, dieresis or umlaut mark
'iacute': chr( 237 ), # small i, acute accent
'icirc' : chr( 238 ), # small i, circumflex accent
'igrave': chr( 236 ), # small i, grave accent
'iuml' : chr( 239 ), # small i, dieresis or umlaut mark
'ntilde': chr( 241 ), # small n, tilde
'oacute': chr( 243 ), # small o, acute accent
'ocirc' : chr( 244 ), # small o, circumflex accent
'ograve': chr( 242 ), # small o, grave accent
'oslash': chr( 248 ), # small o, slash
'otilde': chr( 245 ), # small o, tilde
'ouml' : chr( 246 ), # small o, dieresis or umlaut mark
'szlig' : chr( 223 ), # small sharp s, German (sz ligature)
'thorn' : chr( 254 ), # small thorn, Icelandic
'uacute': chr( 250 ), # small u, acute accent
'ucirc' : chr( 251 ), # small u, circumflex accent
'ugrave': chr( 249 ), # small u, grave accent
'uuml' : chr( 252 ), # small u, dieresis or umlaut mark
'yacute': chr( 253 ), # small y, acute accent
'yuml' : chr( 255 ), # small y, dieresis or umlaut mark
# Some extra Latin 1 chars that are listed in the HTML3.2 draft (21-May-96)
'copy' : chr( 169 ), # copyright sign
'reg' : chr( 174 ), # registered sign
'nbsp' : chr( 160 ), # non breaking space
# Additional ISO-8859/1 entities listed in rfc1866 (section 14)
'iexcl' : chr( 161 ),
'cent' : chr( 162 ),
'pound' : chr( 163 ),
'curren': chr( 164 ),
'yen' : chr( 165 ),
'brvbar': chr( 166 ),
'sect' : chr( 167 ),
'uml' : chr( 168 ),
'ordf' : chr( 170 ),
'laquo' : chr( 171 ),
'not' : chr( 172 ),
'shy' : chr( 173 ),
'macr' : chr( 175 ),
'deg' : chr( 176 ),
'plusmn': chr( 177 ),
'sup1' : chr( 185 ),
'sup2' : chr( 178 ),
'sup3' : chr( 179 ),
'acute' : chr( 180 ),
'micro' : chr( 181 ),
'para' : chr( 182 ),
'middot': chr( 183 ),
'cedil' : chr( 184 ),
'ordm' : chr( 186 ),
'raquo' : chr( 187 ),
'frac14': chr( 188 ),
'frac12': chr( 189 ),
'frac34': chr( 190 ),
'iquest': chr( 191 ),
'times' : chr( 215 ),
'divide': chr( 247 ),
}
self.char2entity = {}
for k, v in self.entity2char.items():
self.char2entity[v] = '&' + str(k) + ';'
for i in range(255):
if chr(i) not in self.char2entity:
self.char2entity[ chr(i) ] = '&#' + str(i) + ';' | PypiClean |
/Achoo-1.0.tar.gz/Achoo-1.0/setup/ez_setup.py | import sys
DEFAULT_VERSION = "0.6c8"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
}
import sys, os
def _validate_md5(egg_name, data):
if egg_name in md5_data:
from md5 import md5
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
from md5 import md5
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:]) | PypiClean |
/Homie4-0.4.0.tar.gz/Homie4-0.4.0/homie/mqtt/homie_mqtt_client.py |
from homie.mqtt.paho_mqtt_client import PAHO_MQTT_Client
#from homie.mqtt.gmqtt_client import GMQTT_Client
MQTT_Client = PAHO_MQTT_Client
import logging
logger = logging.getLogger(__name__)
MQTT_SETTINGS = {
"MQTT_BROKER": None,
"MQTT_PORT": 1883,
"MQTT_USERNAME": None,
"MQTT_PASSWORD": None,
"MQTT_KEEPALIVE": 60,
"MQTT_CLIENT_ID": None,
"MQTT_SHARE_CLIENT": None,
"MQTT_USE_TLS": False,
}
mqtt_client_count = 0
mqtt_clients = []
def _mqtt_validate_settings(settings):
settings = settings.copy()
for setting, value in MQTT_SETTINGS.items():
if not setting in settings:
settings[setting] = MQTT_SETTINGS[setting]
logger.debug("MQTT Settings {} {}".format(setting, settings[setting]))
assert settings["MQTT_BROKER"]
assert settings["MQTT_PORT"]
""" cannot use if two homie clients running from same pc
if settings ['MQTT_CLIENT_ID'] is None or settings ['MQTT_SHARE_CLIENT'] is False:
settings ['MQTT_CLIENT_ID'] = 'homiev3{:04d}'.format(mqtt_client_count)
"""
return settings
common_mqtt_client = None
def connect_mqtt_client(device, mqtt_settings):
global mqtt_client_count
mqtt_settings = _mqtt_validate_settings(mqtt_settings)
mqtt_client = None
last_will_topic = "/".join((device.topic, "$state"))
if mqtt_settings["MQTT_SHARE_CLIENT"] is not True:
logger.info(
"Using new MQTT client, number of instances {}".format(mqtt_client_count)
)
mqtt_client = MQTT_Client(mqtt_settings, last_will_topic)
mqtt_client.connect()
mqtt_client_count = mqtt_client_count + 1
mqtt_clients.append(mqtt_client)
else:
logger.info("Using common MQTT client")
global common_mqtt_client
if common_mqtt_client is None:
common_mqtt_client = MQTT_Client(mqtt_settings,last_will_topic)
common_mqtt_client.connect()
mqtt_client_count = mqtt_client_count + 1
mqtt_clients.append(mqtt_client)
mqtt_client = common_mqtt_client
mqtt_client.add_device(device)
return mqtt_client
def close_mqtt_clients():
logger.info ('Closing MQTT clients')
for client in mqtt_clients:
client.close() | PypiClean |
/KolejkaForeman-0.1.202308091427-py3-none-any.whl/kolejka/foreman/foreman.py |
from kolejka.common import settings
import copy
import datetime
import dateutil.parser
import glob
import json
import logging
import math
from multiprocessing import Process
import os
import pathlib
import random
import shutil
import subprocess
import sys
import tempfile
import traceback
from threading import Thread
import time
import uuid
from kolejka.common import kolejka_config, foreman_config
from kolejka.common import KolejkaTask, KolejkaResult, KolejkaLimits
from kolejka.common import MemoryAction, TimeAction, BigIntAction
from kolejka.client import KolejkaClient
from kolejka.common.gpu import gpu_stats
from kolejka.common.images import (
pull_docker_image,
get_docker_image_size,
check_docker_image_existance,
list_docker_images,
remove_docker_image
)
from kolejka.worker.stage0 import stage0
from kolejka.worker.volume import check_python_volume
def system_reset():
with pathlib.Path('/proc/sysrq-trigger').open('wb') as sysrq_trigger:
sysrq_trigger.write(b'b')
def manage_images(pull, size, necessary_images, priority_images):
necessary_size = sum(necessary_images.values(), 0)
free_size = size - necessary_size
assert free_size >= 0
docker_images = list_docker_images()
p_images = dict()
for image in priority_images:
if image in docker_images:
p_images[image] = docker_images[image]
priority_images = p_images
keep_images = set()
for image in necessary_images:
keep_images.add(image)
list_images = list(priority_images.items())
random.shuffle(list_images)
li = list(docker_images.items())
random.shuffle(li)
list_images += li
for image,size in list_images:
if image in keep_images:
continue
if size <= free_size:
free_size -= size
keep_images.add(image)
for image in docker_images:
if image not in keep_images:
remove_docker_image(image)
for image, size in necessary_images.items():
pull_image = pull
if not pull_image:
if not check_docker_image_existance(image):
pull_image = True
if pull_image:
pull_docker_image(image)
image_size = get_docker_image_size(image)
assert image_size <= size
def foreman_single(temp_path, task, task_timeout=-1):
config = foreman_config()
with tempfile.TemporaryDirectory(temp_path) as jailed_path:
if task.limits.workspace is not None:
subprocess.run(['mount', '-t', 'tmpfs', '-o', 'size='+str(task.limits.workspace), 'none', jailed_path], check=True)
try:
task_path = os.path.join(jailed_path, 'task')
result_path = os.path.join(jailed_path, 'result')
temp_path = os.path.join(jailed_path, 'temp')
os.makedirs(task_path, exist_ok=True)
os.makedirs(result_path, exist_ok=True)
os.makedirs(temp_path, exist_ok=True)
task.path = task_path
client = KolejkaClient()
client.task_get(task.id, task_path)
for k,f in task.files.items():
f.path = k
task.commit()
stage0_timeout = task_timeout
stage0_run = Thread(target=stage0, args=(task.path, result_path), kwargs={'temp_path':temp_path, 'consume_task_folder':True})
stage0_run.start()
stage0_run.join(timeout=stage0_timeout)
if stage0_run.is_alive():
#TODO: Report problem to kolejka-server?
system_reset()
else:
result = KolejkaResult(result_path)
result.tags = config.tags
client.result_put(result)
except:
traceback.print_exc()
finally:
if task.limits.storage is not None:
subprocess.run(['umount', '-l', jailed_path])
def foreman():
config = foreman_config()
gstats = gpu_stats().gpus
limits = KolejkaLimits()
limits.cpus = config.cpus
limits.memory = config.memory
limits.swap = config.swap
limits.pids = config.pids
limits.storage = config.storage
limits.image = config.image
limits.workspace = config.workspace
limits.time = config.time
limits.network = config.network
limits.gpus = config.gpus
limits.perf_instructions = config.perf_instructions
limits.perf_cycles = config.perf_cycles
limits.cgroup_depth = config.cgroup_depth
limits.cgroup_descendants = config.cgroup_descendants
if limits.gpus is None:
limits.gpus = len(gstats)
limits.gpu_memory = config.gpu_memory
for k,v in gstats.items():
if limits.gpu_memory is None:
limits.gpu_memory = v.memory_total
elif v.memory_total is not None:
limits.gpu_memory = min(limits.gpu_memory, v.memory_total)
client = KolejkaClient()
logging.debug(f'Foreman tags: {config.tags}, limits: {limits.dump()}')
while True:
try:
tasks = client.dequeue(config.concurency, limits, config.tags)
if len(tasks) == 0:
time.sleep(config.interval)
else:
check_python_volume()
while len(tasks) > 0:
resources = KolejkaLimits()
resources.copy(limits)
image_usage = dict()
children_args = list()
cpus_offset = 0
gpus_offset = 0
tasks_timeout = None
for task in tasks:
if len(children_args) >= config.concurency:
break
if task.exclusive and len(children_args) > 0:
break
task.limits.update(limits)
task.limits.cpus_offset = cpus_offset
task.limits.gpus_offset = gpus_offset
ok = True
if resources.cpus is not None and task.limits.cpus > resources.cpus:
ok = False
if task.limits.gpus is not None and task.limits.gpus > 0:
if resources.gpus is None or task.limits.gpus > resources.gpus:
ok = False
if resources.gpu_memory is not None and task.limits.gpu_memory > resources.gpu_memory:
ok = False
if resources.memory is not None and task.limits.memory > resources.memory:
ok = False
if resources.swap is not None and task.limits.swap > resources.swap:
ok = False
if resources.pids is not None and task.limits.pids > resources.pids:
ok = False
if resources.storage is not None and task.limits.storage > resources.storage:
ok = False
if resources.image is not None:
image_usage_add = max(image_usage.get(task.image, 0), task.limits.image) - image_usage.get(task.image, 0)
if image_usage_add > resources.image:
ok = False
if resources.workspace is not None and task.limits.workspace > resources.workspace:
ok = False
if resources.perf_instructions is not None and task.limits.perf_instructions > resources.perf_instructions:
ok = False
if resources.perf_cycles is not None and task.limits.perf_cycles > resources.perf_cycles:
ok = False
if resources.cgroup_depth is not None and task.limits.cgroup_depth > resources.cgroup_depth:
ok = False
if resources.cgroup_descendants is not None and task.limits.cgroup_descendants > resources.cgroup_descendants:
ok = False
if ok:
children_args.append([config.temp_path, task])
cpus_offset += task.limits.cpus
if resources.cpus is not None:
resources.cpus -= task.limits.cpus
if resources.gpus is not None and task.limits.gpus is not None:
resources.gpus -= task.limits.gpus
gpus_offset += task.limits.gpus
if resources.memory is not None:
resources.memory -= task.limits.memory
if resources.swap is not None:
resources.swap -= task.limits.swap
if resources.pids is not None:
resources.pids -= task.limits.pids
if resources.storage is not None:
resources.storage -= task.limits.storage
if resources.image is not None:
resources.image -= image_usage_add
image_usage[task.image] = max(image_usage.get(task.image, 0), task.limits.image)
if resources.workspace is not None:
resources.workspace -= task.limits.workspace
if resources.perf_instructions is not None:
resources.perf_instructions -= task.limits.perf_instructions
if resources.perf_cycles is not None:
resources.perf_cycles -= task.limits.perf_cycles
if resources.cgroup_descendants is not None:
resources.cgroup_descendants -= task.limits.cgroup_descendants
if task.limits.time is None:
tasks_timeout = -1
else:
if tasks_timeout is None:
tasks_timeout = task.limits.time.total_seconds()
else:
tasks_timeout = max(task.limits.time.total_seconds(), tasks_timeout)
tasks = tasks[1:]
if task.exclusive:
break
else:
break
if config.image is not None:
manage_images(
config.pull,
config.image,
image_usage,
[task.image for task in tasks]
)
if tasks_timeout is not None and tasks_timeout >= 0:
tasks_timeout = 10 + 2*tasks_timeout
children = list()
for args in children_args:
args.append(tasks_timeout)
process = Process(target=foreman_single, args=args)
children.append(process)
process.start()
for process in children:
process.join()
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
time.sleep(config.interval)
def config_parser(parser):
parser.add_argument('--auto-tags', type=bool, help='add automatically generated machine tags', default=True)
parser.add_argument('--pull', action='store_true', help='always pull images, even if local version is present', default=False)
parser.add_argument('--tags', type=str, help='comma separated list of machine tags')
parser.add_argument('--temp', type=str, help='temp folder')
parser.add_argument('--interval', type=float, help='dequeue interval (in seconds)')
parser.add_argument('--concurency', type=int, help='number of simultaneous tasks')
parser.add_argument('--cpus', type=int, help='cpus limit')
parser.add_argument('--memory', action=MemoryAction, help='memory limit')
parser.add_argument('--swap', action=MemoryAction, help='swap limit')
parser.add_argument('--pids', type=int, help='pids limit')
parser.add_argument('--storage', action=MemoryAction, help='storage limit')
parser.add_argument('--image', action=MemoryAction, help='image size limit')
parser.add_argument('--workspace', action=MemoryAction, help='workspace size limit')
parser.add_argument('--time', action=TimeAction, help='time limit')
parser.add_argument('--network', type=bool, help='allow netowrking')
parser.add_argument('--gpus', type=int, help='gpus limit')
parser.add_argument('--gpu-memory', type=MemoryAction, help='gpu memory limit')
parser.add_argument('--perf-instructions', type=BigIntAction, help='CPU instructions limit')
parser.add_argument('--perf-cycles', type=BigIntAction, help='CPU cycles limit')
parser.add_argument('--cgroup-depth', type=int, help='Cgroup depth limit')
parser.add_argument('--cgroup-descendants', type=int, help='Cgroup descendants limit')
def execute(args):
kolejka_config(args=args)
foreman()
parser.set_defaults(execute=execute) | PypiClean |
/BotEXBotBase-3.1.3.tar.gz/BotEXBotBase-3.1.3/discord/player.py | import threading
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
from .errors import ClientException
from .opus import Encoder as OpusEncoder
log = logging.getLogger(__name__)
__all__ = ["AudioSource", "PCMAudio", "FFmpegPCMAudio", "PCMVolumeTransformer"]
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self):
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
bytes
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self):
"""Checks if the audio source is already encoded in Opus.
Defaults to ``False``.
"""
return False
def cleanup(self):
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self):
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: file-like object
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream):
self.stream = stream
def read(self):
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
class FFmpegPCMAudio(AudioSource):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[str, BinaryIO]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is True then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: str
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: bool
If true, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[BinaryIO]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
options: Optional[str]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
before_options: Optional[str]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source,
*,
executable="ffmpeg",
pipe=False,
stderr=None,
before_options=None,
options=None
):
stdin = None if not pipe else source
args = [executable]
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append("-i")
args.append("-" if pipe else source)
args.extend(("-f", "s16le", "-ar", "48000", "-ac", "2", "-loglevel", "warning"))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append("pipe:1")
self._process = None
try:
self._process = subprocess.Popen(
args, stdin=stdin, stdout=subprocess.PIPE, stderr=stderr
)
self._stdout = self._process.stdout
except FileNotFoundError:
raise ClientException(executable + " was not found.") from None
except subprocess.SubprocessError as exc:
raise ClientException("Popen failed: {0.__class__.__name__}: {0}".format(exc)) from exc
def read(self):
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b""
return ret
def cleanup(self):
proc = self._process
if proc is None:
return
log.info("Preparing to terminate ffmpeg process %s.", proc.pid)
proc.kill()
if proc.poll() is None:
log.info("ffmpeg process %s has not terminated. Waiting to terminate...", proc.pid)
proc.communicate()
log.info(
"ffmpeg process %s should have terminated with a return code of %s.",
proc.pid,
proc.returncode,
)
else:
log.info(
"ffmpeg process %s successfully terminated with return code of %s.",
proc.pid,
proc.returncode,
)
self._process = None
class PCMVolumeTransformer(AudioSource):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: float
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original, volume=1.0):
if not isinstance(original, AudioSource):
raise TypeError("expected AudioSource not {0.__class__.__name__}.".format(original))
if original.is_opus():
raise ClientException("AudioSource must not be Opus encoded.")
self.original = original
self.volume = volume
@property
def volume(self):
"""Retrieves or sets the volume as a floating point percentage (e.g. 1.0 for 100%)."""
return self._volume
@volume.setter
def volume(self, value):
self._volume = max(value, 0.0)
def cleanup(self):
self.original.cleanup()
def read(self):
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source, client, *, after=None):
threading.Thread.__init__(self)
self.daemon = True
self.source = source
self.client = client
self.after = after
self._end = threading.Event()
self._resumed = threading.Event()
self._resumed.set() # we are not paused
self._current_error = None
self._connected = client._connected
self._lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self):
self.loops = 0
self._start = time.time()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.time()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.time()))
time.sleep(delay)
def run(self):
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self):
if self.after is not None:
try:
self.after(self._current_error)
except Exception:
log.exception("Calling the after function failed.")
def stop(self):
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking=True):
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking=True):
self.loops = 0
self._start = time.time()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self):
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self):
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source):
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking):
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
log.info("Speaking call in player failed: %s", e) | PypiClean |
/ERStruct-0.2.1.tar.gz/ERStruct-0.2.1/README.md | # ERstruct - Official Python Implementation
A Python package for inferring the number of top informative PCs that capture population structure based on genotype information.
## Requirements for Data File
1. Data files must be of numpy array `.npy` format. Users can convert VCF (variant call format) file in to numpy array via `vcfnp` package: https://pypi.org/project/vcfnp/, and convert bgen file in to numpy array via `bgen-reader` package: https://pypi.org/project/bgen-reader/.
2. The data matrix must with 0,1,2 and/or NaN (for missing values) entries only. Noting that our package imputes all the missing data (NaN) by 0. Users may perform other types of imputations beforehand.
3. The rows represent individuals and columns represent markers. If there are more than one data files, the data matrix inside must with the same number of rows.
## Dependencies
ERStruct depends on `numpy`, `torch` and `joblib`.
## Installation
Users can install `ERStruct` by running the command below in command line:
```commandline
pip install ERStruct
```
## Parameters
```
erstruct(n, path, rep, alpha, cpu_num=1, device_idx="cpu", varm=2e8, Kc=-1)
```
**n** *(int)* - total number of individuals in the study
**path** *(str)* - the path of data file(s)
**rep** *(int)* - number of simulation times for the null distribution (set to `5000` by default). We recommend to use `rep` between `2/alpha` and `5/alpha`.
**alpha** *(float)* - significance level, can be either a scaler or a vector (set to `1e-3` by default)
**Kc** *(int)* - a coarse estimate of the top PCs number (set to `-1` by default, denoting `Kc = floor(n/10)` when the algorithm running)
**cpu_num** *(int)* - optional, number of CPU cores to be used for parallel computing. (set to `1` by default)
**device_idx** *(str)* - device you are using, "cpu" pr "gpu". (set to `"cpu"` by default)
**varm** *(int)*: - Allocated memory (in bytes) of GPUs for computing. When device_idx is set to "gpu", the varm parameter can be specified to increase the computational speed by allocating the required amount of memory (in bytes) to the GPU. (set to `2e+8` by default)
## Examples
Import ERStruct algorithm
```
from ERStruct import erstruct
```
Download sample dataset (the dataset consists of chromosome 21 and chromosome 22 information for 500 individuals obtained
from sequencing data of the 1000 Genomes Project.):
```angular2html
from ERStruct import download_sample
download_sample()
```
Run ERStruct algorithm on sample dataset with CPUs:
```commandline
test = erstruct(500, ['chr21.npy', 'chr22.npy'], 1000, 5e-3, cpu_num=1, device_idx="cpu")
K = test.run()
```
Run ERStruct algorithm on sample dataset with GPUs:
```commandline
test = erstruct(500, ['chr21.npy', 'chr22.npy'], 1000, 5e-3, device_idx="gpu", varm=2e8)
K = test.run()
```
## Other Details
Please refer to our paper
> [ERStruct: A Python Package for Inferring the Number of Top Principal Components from Whole Genome Sequencing Data](https://www.biorxiv.org/content/10.1101/2022.08.15.503962v2)
For details of the ERStruct algorithm:
> [ERStruct: An Eigenvalue Ratio Approach to Inferring Population Structure from Sequencing Data](https://www.researchgate.net/publication/350647012_ERStruct_An_Eigenvalue_Ratio_Approach_to_Inferring_Population_Structure_from_Sequencing_Data)
If you have any question, please contact the email [email protected]. | PypiClean |
/GeoNode-3.2.0-py3-none-any.whl/geonode/maps/migrations/0025_auto_20170801_1228_squashed_0032_auto_20190404_0820.py |
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
replaces = [('maps', '0025_auto_20170801_1228'), ('maps', '0026_auto_20180301_1947'), ('maps', '0027_auto_20180302_0430'), ('maps', '0028_auto_20180409_1238'), ('maps', '0029_auto_20180412_0822'), ('maps', '0030_auto_20180414_2120'), ('maps', '0031_auto_20190329_1652'), ('maps', '0032_auto_20190404_0820')]
dependencies = [
('maps', '24_initial'),
]
operations = [
migrations.AlterField(
model_name='map',
name='abstract_en',
field=models.TextField(blank=True, help_text='brief narrative summary of the content of the resource(s)', max_length=2000, null=True, verbose_name='abstract'),
),
migrations.AlterField(
model_name='map',
name='data_quality_statement_en',
field=models.TextField(blank=True, help_text="general explanation of the data producer's knowledge about the lineage of a dataset", max_length=2000, null=True, verbose_name='data quality statement'),
),
migrations.AlterField(
model_name='map',
name='purpose_en',
field=models.TextField(blank=True, help_text='summary of the intentions with which the resource(s) was developed', max_length=500, null=True, verbose_name='purpose'),
),
migrations.AlterField(
model_name='map',
name='supplemental_information_en',
field=models.TextField(default='No information provided', help_text='any other descriptive information about the dataset', max_length=2000, null=True, verbose_name='supplemental information'),
),
migrations.AlterModelManagers(
name='map',
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
migrations.AlterModelOptions(
name='map',
options={'base_manager_name': 'objects'},
),
migrations.AlterModelManagers(
name='map',
managers=[
],
),
migrations.AlterModelOptions(
name='map',
options={},
),
migrations.AlterModelManagers(
name='map',
managers=[
('objects', django.db.models.manager.Manager()),
('base_objects', django.db.models.manager.Manager()),
],
),
] | PypiClean |
/django-chuck-0.2.3.tar.gz/django-chuck/modules/django-cms/project/apps/filer/migrations/0005_auto__add_field_file_sha1__chg_field_file_file.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'File.sha1'
db.add_column('filer_file', 'sha1', self.gf('django.db.models.fields.CharField')(default='', max_length=40, blank=True), keep_default=False)
# Changing field 'File.file'
db.alter_column('filer_file', 'file', self.gf('django.db.models.fields.files.FileField')(max_length=255, null=True))
def backwards(self, orm):
# Deleting field 'File.sha1'
db.delete_column('filer_file', 'sha1')
# Changing field 'File.file'
db.alter_column('filer_file', 'file', self.gf('django.db.models.fields.files.FileField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.clipboard': {
'Meta': {'object_name': 'Clipboard'},
'files': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'in_clipboards'", 'symmetrical': 'False', 'through': "orm['filer.ClipboardItem']", 'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filer_clipboards'", 'to': "orm['auth.User']"})
},
'filer.clipboarditem': {
'Meta': {'object_name': 'ClipboardItem'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Clipboard']"}),
'file': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.File']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_file_type_plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folderpermission': {
'Meta': {'object_name': 'FolderPermission'},
'can_add_children': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'everybody': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Folder']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_folder_permissions'", 'null': 'True', 'to': "orm['auth.User']"})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['filer'] | PypiClean |
/Files.com-1.0.1051-py3-none-any.whl/files_sdk/models/external_event.py | import builtins
import datetime
from files_sdk.api import Api
from files_sdk.list_obj import ListObj
from files_sdk.exceptions import InvalidParameterError, MissingParameterError, NotImplementedError
class ExternalEvent:
default_attributes = {
'id': None, # int64 - Event ID
'event_type': None, # string - Type of event being recorded.
'status': None, # string - Status of event.
'body': None, # string - Event body
'created_at': None, # date-time - External event create date/time
'body_url': None, # string - Link to log file.
'folder_behavior_id': None, # int64 - Folder Behavior ID
'successful_files': None, # int64 - For sync events, the number of files handled successfully.
'errored_files': None, # int64 - For sync events, the number of files that encountered errors.
'bytes_synced': None, # int64 - For sync events, the total number of bytes synced.
'remote_server_type': None, # string - Associated Remote Server type, if any
}
def __init__(self, attributes=None, options=None):
if not isinstance(attributes, dict):
attributes = {}
if not isinstance(options, dict):
options = {}
self.set_attributes(attributes)
self.options = options
def set_attributes(self, attributes):
for (attribute, default_value) in ExternalEvent.default_attributes.items():
setattr(self, attribute, attributes.get(attribute, default_value))
def get_attributes(self):
return {k: getattr(self, k, None) for k in ExternalEvent.default_attributes if getattr(self, k, None) is not None}
def save(self):
if hasattr(self, "id") and self.id:
raise NotImplementedError("The ExternalEvent object doesn't support updates.")
else:
new_obj = create(self.get_attributes(), self.options)
self.set_attributes(new_obj.get_attributes())
# Parameters:
# cursor - string - Used for pagination. When a list request has more records available, cursors are provided in the response headers `X-Files-Cursor-Next` and `X-Files-Cursor-Prev`. Send one of those cursor value here to resume an existing list from the next available record. Note: many of our SDKs have iterator methods that will automatically handle cursor-based pagination.
# per_page - int64 - Number of records to show per page. (Max: 10,000, 1,000 or less is recommended).
# sort_by - object - If set, sort records by the specified field in either `asc` or `desc` direction (e.g. `sort_by[remote_server_type]=desc`). Valid fields are `remote_server_type`, `site_id`, `folder_behavior_id`, `event_type`, `created_at` or `status`.
# filter - object - If set, return records where the specified field is equal to the supplied value. Valid fields are `created_at`, `event_type`, `remote_server_type`, `status` or `folder_behavior_id`. Valid field combinations are `[ event_type, status, created_at ]`, `[ event_type, created_at ]` or `[ status, created_at ]`.
# filter_gt - object - If set, return records where the specified field is greater than the supplied value. Valid fields are `created_at`.
# filter_gteq - object - If set, return records where the specified field is greater than or equal the supplied value. Valid fields are `created_at`.
# filter_prefix - object - If set, return records where the specified field is prefixed by the supplied value. Valid fields are `remote_server_type`.
# filter_lt - object - If set, return records where the specified field is less than the supplied value. Valid fields are `created_at`.
# filter_lteq - object - If set, return records where the specified field is less than or equal the supplied value. Valid fields are `created_at`.
def list(params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
if "cursor" in params and not isinstance(params["cursor"], str):
raise InvalidParameterError("Bad parameter: cursor must be an str")
if "per_page" in params and not isinstance(params["per_page"], int):
raise InvalidParameterError("Bad parameter: per_page must be an int")
if "sort_by" in params and not isinstance(params["sort_by"], dict):
raise InvalidParameterError("Bad parameter: sort_by must be an dict")
if "filter" in params and not isinstance(params["filter"], dict):
raise InvalidParameterError("Bad parameter: filter must be an dict")
if "filter_gt" in params and not isinstance(params["filter_gt"], dict):
raise InvalidParameterError("Bad parameter: filter_gt must be an dict")
if "filter_gteq" in params and not isinstance(params["filter_gteq"], dict):
raise InvalidParameterError("Bad parameter: filter_gteq must be an dict")
if "filter_prefix" in params and not isinstance(params["filter_prefix"], dict):
raise InvalidParameterError("Bad parameter: filter_prefix must be an dict")
if "filter_lt" in params and not isinstance(params["filter_lt"], dict):
raise InvalidParameterError("Bad parameter: filter_lt must be an dict")
if "filter_lteq" in params and not isinstance(params["filter_lteq"], dict):
raise InvalidParameterError("Bad parameter: filter_lteq must be an dict")
return ListObj(ExternalEvent,"GET", "/external_events", params, options)
def all(params = None, options = None):
list(params, options)
# Parameters:
# id (required) - int64 - External Event ID.
def find(id, params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
params["id"] = id
if "id" in params and not isinstance(params["id"], int):
raise InvalidParameterError("Bad parameter: id must be an int")
if "id" not in params:
raise MissingParameterError("Parameter missing: id")
response, options = Api.send_request("GET", "/external_events/{id}".format(id=params['id']), params, options)
return ExternalEvent(response.data, options)
def get(id, params = None, options = None):
find(id, params, options)
# Parameters:
# status (required) - string - Status of event.
# body (required) - string - Event body
def create(params = None, options = None):
if not isinstance(params, dict):
params = {}
if not isinstance(options, dict):
options = {}
if "status" in params and not isinstance(params["status"], str):
raise InvalidParameterError("Bad parameter: status must be an str")
if "body" in params and not isinstance(params["body"], str):
raise InvalidParameterError("Bad parameter: body must be an str")
if "status" not in params:
raise MissingParameterError("Parameter missing: status")
if "body" not in params:
raise MissingParameterError("Parameter missing: body")
response, options = Api.send_request("POST", "/external_events", params, options)
return ExternalEvent(response.data, options)
def new(*args, **kwargs):
return ExternalEvent(*args, **kwargs) | PypiClean |
/Django-Data-Import-1.0.2.tar.gz/Django-Data-Import-1.0.2/django_data_import/mixins.py | from __future__ import absolute_import
from __future__ import with_statement
import csv
import django
from django.utils.translation import ugettext_lazy as _
from django.conf.urls import patterns, url
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.http import HttpResponseRedirect
from django.template.response import TemplateResponse
from .forms import ImportForm, ConfirmImportForm, get_model_form, get_model_formset
class ImportDataMixin(object):
"""
Data import mixin.
"""
change_list_template = 'admin/django_data_import/change_list_import.html'
import_template_name = 'admin/django_data_import/import.html'
def get_urls(self):
urls = super(ImportDataMixin, self).get_urls()
my_urls = patterns(
'',
url(
r'^import/$',
self.admin_site.admin_view(self.import_action),
name='%s_%s_import' % self._get_model_info()
)
)
return my_urls + urls
def import_action(self, request, *args, **kwargs):
"""
Custom end-point to import CSV file.
Here we show form to select file and save data to database.
"""
context = {}
save_data = request.POST.get('save_data', False)
form = ImportForm(request.POST or None, request.FILES or None)
model_fields = self._get_field_names()
if save_data:
import_form = get_model_form(self.model, fields=model_fields)
import_formset = get_model_formset(import_form, request.POST)
created_counter = 0
for import_form in import_formset:
try:
if import_form.is_valid():
import_form.save()
created_counter += 1
except (IntegrityError, TypeError):
pass
success_message = _('Imported {} rows'.format(created_counter))
messages.success(request, success_message)
url = reverse(
'admin:%s_%s_changelist' % self._get_model_info(),
current_app=self.admin_site.name
)
return HttpResponseRedirect(url)
elif request.method == 'POST' and form.is_valid():
import_file = form.cleaned_data['import_file']
delimiter = str(form.cleaned_data['delimiter'])
csv_data = self._read_csv_file(import_file, delimiter=delimiter)
import_form = get_model_form(self.model, fields=model_fields)
context['import_formset'] = get_model_formset(import_form, initial=csv_data)
context['confirm_form'] = ConfirmImportForm(initial={
'delimiter': form.cleaned_data['delimiter'],
})
if django.VERSION >= (1, 8, 0):
context.update(self.admin_site.each_context(request))
elif django.VERSION >= (1, 7, 0):
context.update(self.admin_site.each_context())
context['form'] = form
context['opts'] = self.model._meta
context['fields'] = model_fields
return TemplateResponse(
request,
self.import_template_name,
context,
current_app=self.admin_site.name
)
def _get_field_names(self):
return [f.name for f in self.model._meta.fields if f.name != 'id']
def _get_model_info(self):
# module_name is renamed to model_name in Django 1.8
app_label = self.model._meta.app_label
try:
return (app_label, self.model._meta.model_name)
except AttributeError:
return (app_label, self.model._meta.module_name)
def _read_csv_file(self, filename, delimiter=','):
"""
Return list of dicts from given CSV file.
"""
reader = csv.DictReader(filename, delimiter=delimiter, fieldnames=self._get_field_names())
return list(reader) | PypiClean |
/KratosChimeraApplication-9.4-cp38-cp38-win_amd64.whl/KratosMultiphysics/ChimeraApplication/rotate_region_process.py | import KratosMultiphysics
import KratosMultiphysics.ChimeraApplication as KratosChimera
def Factory(settings, Model):
if ( not isinstance(settings, KratosMultiphysics.Parameters) ):
raise Exception("expected input shall be a Parameters object, encapsulating a json string")
return ApplyRotateRegionProcess(Model, settings["Parameters"])
class ApplyRotateRegionProcess(KratosMultiphysics.Process):
"""This process applies a rotation to a given modelpart or a submodelpart
Public member variables:
Model -- the container of the different model parts.
settings -- Kratos parameters containing solver settings.
"""
def __init__(self, Model, settings):
""" The default constructor of the class
Keyword arguments:
self -- It signifies an instance of a class.
Model -- the container of the different model parts.
settings -- Kratos parameters containing solver settings.
"""
KratosMultiphysics.Process.__init__(self)
# settings for inlet with interface between fluids and separate velocities
default_settings = KratosMultiphysics.Parameters("""
{
"model_part_name":"",
"center_of_rotation":[0.0,0.0,0.0],
"calculate_torque":false,
"torque_model_part_name":"",
"moment_of_inertia":0.0,
"rotational_damping":0.0,
"angular_velocity_radians":0.0,
"axis_of_rotation":[0.0,0.0,0.0],
"is_ale" : false,
"interval": [0.0, 1e30]
}
""")
# Assign this here since it will change the "interval" prior to validation
self.interval = KratosMultiphysics.IntervalUtility(settings)
# compare against the appropriate default settings
settings.ValidateAndAssignDefaults(default_settings)
# checking for empty model part name
if (settings["model_part_name"].GetString() == ""):
raise Exception("ApplyRotateRegionProcess: A value (string) for the entry 'model_part_name' must be given in the parameters of the process.")
# Get the modelpart to rotate
self.model_part = Model[settings["model_part_name"].GetString()]
if ( settings["axis_of_rotation"].IsVector() ):
axis_of_rotation = settings["axis_of_rotation"].GetVector()
if ( axis_of_rotation[0] == 0.0 and axis_of_rotation[1] == 0.0 and axis_of_rotation[2] == 0.0):
raise Exception("The values (vector) of the entry 'axis_of_rotation' are all zero. This is not admissible.")
if (settings["calculate_torque"].GetBool() and settings["angular_velocity_radians"].GetDouble()!=0.0):
raise Exception("'calculate_torque' is set to true and 'angular_velocity_radians' is not zero. This is not admissible.")
if(settings["calculate_torque"].GetBool() and settings["moment_of_inertia"].GetDouble() == 0.0):
KratosMultiphysics.Logger.PrintWarning("RotateRegionProcess", " 'moment_of_inertia' is zero !!")
if(settings["calculate_torque"].GetBool() and settings["rotational_damping"].GetDouble() == 0.0):
KratosMultiphysics.Logger.PrintWarning("RotateRegionProcess", " 'rotational_damping' is zero !!")
# If no torque_model_part_name is specified remove it to avoid later problems
if (settings["torque_model_part_name"].GetString() == ""):
settings.RemoveValue("torque_model_part_name")
settings.RemoveValue("interval")
# Making the actual process
self.rotate_region_process = KratosChimera.RotateRegionProcess(self.model_part, settings)
def ExecuteInitializeSolutionStep(self):
""" This method is executed in order to initialize the current step
Keyword arguments:
self -- It signifies an instance of a class.
"""
current_time = self.model_part.ProcessInfo[KratosMultiphysics.TIME]
if self.interval.IsInInterval(current_time):
self.rotate_region_process.ExecuteInitializeSolutionStep()
def ExecuteFinalizeSolutionStep(self):
""" This method is executed in order to finalize the current step
Keyword arguments:
self -- It signifies an instance of a class.
"""
current_time = self.model_part.ProcessInfo[KratosMultiphysics.TIME]
if self.interval.IsInInterval(current_time):
self.rotate_region_process.ExecuteFinalizeSolutionStep() | PypiClean |
/Copreus-0.4.0.tar.gz/Copreus-0.4.0/copreus/drivers/rgbled.py | import RPi.GPIO as GPIO
from copreus.baseclasses.adriver import ADriver
from copreus.schema.rgbled import get_schema
import json
import jsonschema
from enum import Enum
from asyncscheduler import AsyncScheduler
from pelops.ui.tools import parse
message_rgb_schema = {
"type": "object",
"description": "rgb boolean",
"properties": {
"r": {
"description": "red",
"type": "boolean"
},
"g": {
"description": "green",
"type": "boolean"
},
"b": {
"description": "blue",
"type": "boolean"
}
},
"required": ["r", "g", "b"],
"additionalItems": False
}
message_color_schema = {
"type": "object",
"description": "rgb color name",
"properties": {
"color": {
"description": "color",
"type": "string",
"enum": ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "AQUA", "MAGENTA"]
}
},
"required": ["color"],
"additionalItems": False
}
message_blink_symmetric_schema = {
"type": "object",
"description": "alternate between two color names with equal delays",
"properties": {
"color_a": {
"description": "color a",
"type": "string",
"enum": ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "AQUA", "MAGENTA"]
},
"color_b": {
"description": "color b",
"type": "string",
"enum": ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "AQUA", "MAGENTA"]
},
"delay": {
"description": "delay after activating each color in seconds",
"type": "number"
}
},
"required": ["color_a", "color_b", "delay"],
"additionalItems": False
}
message_blink_asymmetric_schema = {
"type": "object",
"description": "alternate between two color names with two different delays",
"properties": {
"color_a": {
"description": "color a",
"type": "string",
"enum": ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "AQUA", "MAGENTA"]
},
"color_b": {
"description": "color b",
"type": "string",
"enum": ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "AQUA", "MAGENTA"]
},
"delay_a": {
"description": "delay after activating color a in seconds",
"type": "number"
},
"delay_b": {
"description": "delay after activating color b in seconds",
"type": "number"
}
},
"required": ["color_a", "color_b", "delay_a", "delay_b"],
"additionalItems": False
}
message_schema = {
"definitions": {},
"$schema": "http://json-schema.org/draft-06/schema#",
"oneOf": [
message_rgb_schema,
message_color_schema,
message_blink_symmetric_schema,
message_blink_asymmetric_schema
],
"additionalItems": False
}
class Color(Enum):
BLACK = {"r": False, "g": False, "b": False}
WHITE = {"r": True, "g": True, "b": True}
RED = {"r": True, "g": False, "b": False}
GREEN = {"r": False, "g": True, "b": False}
BLUE = {"r": False, "g": False, "b": True}
YELLOW = {"r": True, "g": True, "b": False}
AQUA = {"r": False, "g": True, "b": True}
MAGENTA = {"r": True, "g": False, "b": True}
class ALEDDriver:
_logger = None
_set_pins = None
_scheduler = None
def __init__(self, set_pins, logger, scheduler, struct):
self._logger = logger
self._scheduler = scheduler
self._set_pins = set_pins
self._process_struct(struct)
def _process_struct(self, struct):
raise NotImplementedError()
def display(self):
self._scheduler.clear_scheduler()
try:
self._scheduler.stop()
except AttributeError:
# expected exception - AsyncScheduler expects to be running when stop is being called.
# TODO - update AsyncScheduler implementation
pass
self._display()
def _display(self):
raise NotImplementedError()
class StaticColor(ALEDDriver):
_color = None
def _process_struct(self, struct):
self._color = Color[struct["color"]]
def _display(self):
self._logger.info("StaticColor.setting color '{}'".format(self._color))
self._set_pins(self._color.value["r"], self._color.value["g"], self._color.value["b"])
class StaticRGB(ALEDDriver):
_rgb = None
def _process_struct(self, struct):
self._rgb = struct
def _display(self):
self._logger.info("StaticRGB.setting rgb '{}'".format(self._rgb))
self._set_pins(self._rgb["r"], self._rgb["g"], self._rgb["b"])
class BlinkColorsAsymmetric(ALEDDriver):
_color_a = None
_color_b = None
_delay_a = None
_delay_b = None
def _process_struct(self, data):
self._color_a = Color[data["color_a"]]
self._color_b = Color[data["color_b"]]
self._delay_a = data["delay_a"]
self._delay_b = data["delay_b"]
def _display_color(self, color):
self._logger.info("BlinkColors._display_color color '{}'".format(color))
self._set_pins(color.value["r"], color.value["g"], color.value["b"])
def _add_repeat_color(self, delay, color):
self._logger.info("BlinkColors._add_repeat_color - add repeat {} for color {} to scheduler.".format(delay, color))
self._scheduler.repeat(delay, 1, self._display_color, (color,))
def _display(self):
delay = self._delay_a + self._delay_b
self._scheduler.enter(0, 1, self._add_repeat_color, (delay, self._color_a))
self._scheduler.enter(self._delay_a, 1, self._add_repeat_color, (delay, self._color_b))
self._display_color(self._color_b)
self._scheduler.start()
class BlinkColorsSymmetric(BlinkColorsAsymmetric):
def _process_struct(self, data):
self._color_a = Color[data["color_a"]]
self._color_b = Color[data["color_b"]]
self._delay_a = data["delay"]
self._delay_b = data["delay"]
class LEDDriverFactory:
@staticmethod
def create(set_pins, logger, scheduler, struct):
if "delay" in struct:
driver = BlinkColorsSymmetric(set_pins, logger, scheduler, struct)
elif "delay_a" in struct:
driver = BlinkColorsAsymmetric(set_pins, logger, scheduler, struct)
elif "color" in struct:
driver = StaticColor(set_pins, logger, scheduler, struct)
elif "r" in struct:
driver = StaticRGB(set_pins, logger, scheduler, struct)
else:
raise ValueError("LEDDriverFactory.create - dont know how to handle struct '{}'".format(struct))
return driver
class RGBLed(ADriver):
"""Generic driver that sets the given output pin.
The driver entry in the yaml file consists of:
* ADriver entries
* topics_sub: closed - mqtt-translations.closed-true and mqtt-translations.closed-false
* Output entries
* pin-red/pin-green/pin-blue: gpio pin
* physical-closed: high/low - mapping between logcial states (closed/open) and physical output
parameters (low/high)
* initial-color: ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "AQUA", "MAGENTA"]
The accepted message schemata are:
* rgb: {"r": True, "b": False, "g": True}
* color: {"color": "MAGENTA"}
* blink_symmetric: {"color_a": "MAGENTA", "color_b": "AQUA", "delay": 1}
* blink_asymmetric: {"color_a": "MAGENTA", "color_b": "AQUA", "delay_a": 1, "delay_b": 2}
Example:
driver:
type: rgbled
pin-red: 21
pin-green: 22
pin-blue: 23
initial-color: GREEN # ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "AQUA", "MAGENTA"]
physical-closed: high
topics-sub:
command: /test/rgbled # four different message types are accepted: rgb, color, blink_symmetric, blink_asymmetric
"""
_pin_red = -1 # gpio pin id
_pin_green = -1 # gpio pin id
_pin_blue = -1 # gpio pin id
_gpio_closed_red = -1 # value to write to gpio pin for closing output (0/1)
_gpio_opened_red = -1 # value to write to gpio pin for opening output (0/1)
_gpio_closed_green = -1 # value to write to gpio pin for closing output (0/1)
_gpio_opened_green = -1 # value to write to gpio pin for opening output (0/1)
_gpio_closed_blue = -1 # value to write to gpio pin for closing output (0/1)
_gpio_opened_blue = -1 # value to write to gpio pin for opening output (0/1)
_initial_color = None # should the output be opened or closed after start
_scheduler = None
_active_driver = None
def __init__(self, config, mqtt_client=None, logger=None, stdout_log_level=None, no_gui=None,
manage_monitoring_agent=True):
ADriver.__init__(self, config, mqtt_client, logger, logger_name=self.__class__.__name__,
stdout_log_level=stdout_log_level, no_gui=no_gui,
manage_monitoring_agent=manage_monitoring_agent)
self._pin_red = int(self._config["pin-red"])
self._pin_green = int(self._config["pin-green"])
self._pin_blue = int(self._config["pin-blue"])
self._initial_color = Color[self._config["initial-color"]]
self._gpio_closed_red = self._read_physical_closed("physical-closed-red")
self._gpio_opened_red = (self._gpio_closed_red + 1) % 2
self._gpio_closed_green = self._read_physical_closed("physical-closed-green")
self._gpio_opened_green = (self._gpio_closed_green + 1) % 2
self._gpio_closed_blue = self._read_physical_closed("physical-closed-blue")
self._gpio_opened_blue = (self._gpio_closed_blue + 1) % 2
self._scheduler = AsyncScheduler()
self._ui_commands["gpio_color"] = self._cmd_gpio_color
self._ui_commands["gpio_rgb"] = self._cmd_gpio_rgb
self._ui_commands["gpio_blink"] = self._cmd_gpio_blink
self._ui_commands["gpio_state"] = self._cmd_gpio_state
def _read_physical_closed(self, config_entry_name):
if str(self._config[config_entry_name].lower()) == "low":
gpio_closed = 0
elif str(self._config[config_entry_name].lower()) == "high":
gpio_closed = 1
else:
self._logger.error("'{}' - expected 'low'/'high' but received '{}'.".
format(config_entry_name, self._config[config_entry_name].lower()))
raise ValueError("'{}' - expected 'low'/'high' but received '{}'.".
format(config_entry_name, self._config[config_entry_name].lower()))
return gpio_closed
def _cmd_gpio_color(self, args):
"""gpio_color - sets the rgb-led to the named color: GPIO_COLOR [BLACK|WHITE|RED|GREEN|BLUE|YELLOW|AQUA|MAGENTA]"""
args = parse(args)
print("{} {} '{}'\n".format(args, len(args), args[0].upper()))
if len(args) != 1:
print("Wrong arguments: {}. expected 'GPIO_COLOR [BLACK|WHITE|RED|GREEN|BLUE|YELLOW|AQUA|MAGENTA]'.\n".format(args))
elif args[0].upper() not in Color.__members__:
print("Wrong color: {}. expected 'GPIO_COLOR [BLACK|WHITE|RED|GREEN|BLUE|YELLOW|AQUA|MAGENTA]'.\n".format(args[0].upper()))
else:
color = args[0].upper()
print("Setting color to: {}.".format(color))
self._active_driver = StaticColor(self._set_pins, self._logger, self._scheduler, {"color": color})
self._active_driver.display()
def _cmd_gpio_rgb(self, args):
"""gpio_rgb - sets the rgb-led to the boolean values: GPIO_RGB Red Green Blue"""
def _check_is_bool(s):
s = s.lower()
return s in ["true", "false"]
args = parse(args)
if len(args) != 3:
print("Wrong arguments: {}. expected 'GPIO_RGB Red Green Blue'.\n".format(args))
elif not (_check_is_bool(args[0]) and _check_is_bool(args[1]) and _check_is_bool(args[2])):
print("All three parameters must be either 'True' of 'False. got: '{}'.\n".format(args))
else:
struct = {
"r": args[0].lower() == "true",
"g": args[1].lower() == "true",
"b": args[2].lower() == "true"
}
print("Setting rgb to: {}.".format(struct))
self._active_driver = StaticRGB(self._set_pins, self._logger, self._scheduler, struct)
self._active_driver.display()
def _cmd_gpio_blink(self, args):
"""gpio_blink - sets two colors and the delays for alternating between them: GPIO_BLINK [BLACK|WHITE|RED|GREEN|BLUE|YELLOW|AQUA|MAGENTA] [BLACK|WHITE|RED|GREEN|BLUE|YELLOW|AQUA|MAGENTA] delay (delay)"""
expected = "GPIO_BLINK [BLACK|WHITE|RED|GREEN|BLUE|YELLOW|AQUA|MAGENTA] [BLACK|WHITE|RED|GREEN|BLUE|YELLOW|AQUA|MAGENTA] delay (delay)"
args = parse(args)
if len(args) < 3 or len(args) > 4:
print("Wrong arguments: {}. expected '{}'.\n".format(args, expected))
elif args[0].upper() not in Color.__members__:
print("Wrong color A: {}. expected '{}'.\n".format(args[0].upper(), expected))
elif args[1].upper() not in Color.__members__:
print("Wrong color B: {}. expected '{}'.\n".format(args[1].upper(), expected))
elif len(args) == 3:
try:
delay = float(args[2])
except ValueError:
print("Wrong delay value: {}. expected a float value.\n".format(args[2]))
return
struct = {
"delay": delay,
"color_a": args[0].upper(),
"color_b": args[1].upper()
}
print("Setting symmetric blink to {}".format(struct))
self._active_driver = BlinkColorsSymmetric(self._set_pins, self._logger, self._scheduler, struct)
self._active_driver.display()
else:
try:
delay_a = float(args[2])
except ValueError:
print("Wrong delay value: {}. expected a float value.\n".format(args[2]))
return
try:
delay_b = float(args[3])
except ValueError:
print("Wrong delay value: {}. expected a float value.\n".format(args[3]))
return
struct = {
"delay_a": delay_a,
"delay_b": delay_b,
"color_a": args[0].upper(),
"color_b": args[1].upper()
}
print("Setting asymmetric blink to {}".format(struct))
self._active_driver = BlinkColorsAsymmetric(self._set_pins, self._logger, self._scheduler, struct)
self._active_driver.display()
def _cmd_gpio_state(self, args):
"""gpio_state - reads the state of the gpio: GPIO_STATE"""
if GPIO.input(self._pin_red) == self._gpio_closed_red:
state_red = "closed"
else:
state_red = "open"
if GPIO.input(self._pin_green) == self._gpio_closed_green:
state_green = "closed"
else:
state_green = "open"
if GPIO.input(self._pin_blue) == self._gpio_closed_blue:
state_blue = "closed"
else:
state_blue = "open"
print("[{}] gpios: red {} is {}, green {} is {}, blue {} is {}.".format(self._name, self._pin_red, state_red, self._pin_green, state_green, self._pin_blue, state_blue))
def _message_handler(self, msg):
"""on_message handler for topic sub 'command'."""
self._logger.info("received message '{}' on topic '{}'.".format(msg, self._topics_sub["command"]))
temp = msg.decode("UTF-8")
struct = json.loads(temp)
try:
jsonschema.validate(struct, message_schema)
except jsonschema.exceptions.ValidationError:
raise ValueError("RGBLed.'{}'.payload received unexpected message format: '{}'.".format(msg.topic, temp))
except jsonschema.exceptions.SchemaError:
raise RuntimeError("RGBLed._message_handler - schema error!")
self._active_driver = LEDDriverFactory.create(self._set_pins, self._logger, self._scheduler, struct)
self._active_driver.display()
def _set_pins(self, red, green, blue):
def _output(pin, closed, open, value):
if value:
GPIO.output(pin, closed)
else:
GPIO.output(pin, open)
_output(self._pin_red, self._gpio_closed_red, self._gpio_opened_red, red)
_output(self._pin_green, self._gpio_closed_green, self._gpio_opened_green, green)
_output(self._pin_blue, self._gpio_closed_blue, self._gpio_opened_blue, blue)
def _driver_start(self):
"""ADriver._driver_start"""
GPIO.setmode(GPIO.BCM)
GPIO.setup(self._pin_red, GPIO.OUT)
GPIO.setup(self._pin_blue, GPIO.OUT)
GPIO.setup(self._pin_green, GPIO.OUT)
self._active_driver = StaticColor(self._set_pins, self._logger, self._scheduler, {"color": self._initial_color.name})
self._active_driver.display()
self._mqtt_client.subscribe(self._topics_sub["command"], self._message_handler)
def _driver_stop(self):
"""ADriver._driver_stop"""
self._mqtt_client.unsubscribe(self._topics_sub["command"], self._message_handler)
GPIO.cleanup(self._pin_red)
GPIO.cleanup(self._pin_blue)
GPIO.cleanup(self._pin_green)
@classmethod
def _get_schema(cls):
return get_schema()
def _runtime_information(self):
return {}
def _config_information(self):
return {}
def standalone():
"""Calls the static method Output.standalone()."""
RGBLed.standalone()
if __name__ == "__main__":
RGBLed.standalone() | PypiClean |
/FuXi-1.4.production.tar.gz/FuXi-1.4.production/lib/Rete/TopDown.py | import itertools, copy, pickle
try:
from hashlib import md5 as createDigest
except:
from md5 import new as createDigest
from FuXi.Rete.AlphaNode import ReteToken, AlphaNode
from FuXi.Horn.HornRules import Clause, Ruleset, Rule, HornFromN3
from FuXi.Rete.RuleStore import *
from FuXi.Rete.Magic import AdornLiteral
from FuXi.Horn.PositiveConditions import *
from FuXi.Rete.Proof import *
from FuXi.Rete.Util import selective_memoize, lazyGeneratorPeek
from rdflib.Graph import ReadOnlyGraphAggregate
from rdflib import URIRef, RDF, Namespace, Variable
from rdflib.util import first
from rdflib.syntax.xml_names import split_uri
from FuXi.Rete.SidewaysInformationPassing import *
from FuXi.SPARQL import EDBQuery, normalizeBindingsAndQuery
def makeMD5Digest(value):
return createDigest(
isinstance(value, unicode) and value.encode('utf-8')
or value).hexdigest()
def PrepareSipCollection(adornedRuleset):
"""
Takes adorned ruleset and returns an RDF dataset
formed from the sips associated with each adorned
rule as named graphs. Also returns a mapping from
the head predicates of each rule to the rules that match
it - for efficient retrieval later
"""
headToRule = {}
graphs = []
secondOrderRules = set()
for rule in adornedRuleset:
ruleHead = GetOp(rule.formula.head)
if isinstance(ruleHead,Variable):
#We store second order rules (i.e., rules whose head is a
#predicate occurrence whose predicate symbol is a variable) aside
secondOrderRules.add(rule)
headToRule.setdefault(ruleHead,set()).add(rule)
if hasattr(rule,'sip'):
graphs.append(rule.sip)
#Second order rules are mapped from a None key (in order to indicate they are wildcards)
headToRule[None]=secondOrderRules
if not graphs:
return
graph = ReadOnlyGraphAggregate(graphs)
graph.headToRule = headToRule
return graph
def getBindingsFromLiteral(groundTuple,ungroundLiteral):
"""
Takes a ground fact and a query literal and returns
the mappings from variables in the query literal
to terms in the ground fact
"""
ungroundTuple = ungroundLiteral.toRDFTuple()
return ImmutableDict([(term,groundTuple[idx])
for idx,term in enumerate(ungroundTuple)
if isinstance(term,Variable) and
not isinstance(groundTuple[idx],Variable)])
def tripleToTriplePattern(graph,term):
if isinstance(term,N3Builtin):
template = graph.templateMap[term.uri]
return "FILTER(%s)"%(template%(term.argument.n3(),
term.result.n3()))
else:
return "%s %s %s"%tuple([renderTerm(graph,term)
for term in term.toRDFTuple()])
@selective_memoize([0])
def normalizeUri(rdfTerm,revNsMap):
"""
Takes an RDF Term and 'normalizes' it into a QName (using the registered prefix)
or (unlike compute_qname) the Notation 3 form for URIs: <...URI...>
"""
try:
namespace, name = split_uri(rdfTerm)
namespace = URIRef(namespace)
except:
if isinstance(rdfTerm,Variable):
return "?%s"%rdfTerm
else:
return "<%s>"%rdfTerm
prefix = revNsMap.get(namespace)
if prefix is None and isinstance(rdfTerm,Variable):
return "?%s"%rdfTerm
elif prefix is None:
return "<%s>"%rdfTerm
else:
qNameParts = compute_qname(rdfTerm,revNsMap)
return ':'.join([qNameParts[0],qNameParts[-1]])
@selective_memoize([0])
def compute_qname(uri,revNsMap):
namespace, name = split_uri(uri)
namespace = URIRef(namespace)
prefix = revNsMap.get(namespace)
if prefix is None:
prefix = "_%s" % len(revNsMap)
revNsMap[namespace]=prefix
return (prefix, namespace, name)
def renderTerm(graph,term):
if term == RDF.type:
return ' a '
elif isinstance(term,URIRef):
qname = normalizeUri(term,hasattr(graph,'revNsMap') and graph.revNsMap or \
dict([(u,p) for p,u in graph.namespaces()]))
return u"<%s>"%term if qname[0] in ['_',':'] else qname
else:
try:
return isinstance(term,BNode) and term.n3() or graph.qname(term)
except:
return term.n3()
def RDFTuplesToSPARQL(conjunct,
edb,
isGround=False,
vars=[],
symmAtomicInclusion=False):
"""
Takes a conjunction of Horn literals and returns the
corresponding SPARQL query
"""
queryType = isGround and "ASK" or "SELECT %s"%(' '.join([v.n3()
for v in vars]))
queryShell = len(conjunct)>1 and "%s {\n%s\n}" or "%s { %s }"
if symmAtomicInclusion:
if vars:
var = vars.pop()
prefix = "%s a ?KIND"%var.n3()
else:
prefix = "%s a ?KIND"%first([lit.arg[0].n3() for lit in conjunct])
subquery = queryShell%(queryType,
"%s\nFILTER(%s)"%(
prefix,
' ||\n'.join([
'?KIND = %s'%edb.qname(GetOp(lit))
for lit in conjunct])))
else:
subquery = queryShell%(queryType,' .\n'.join(['\t'+tripleToTriplePattern(
edb,
lit)
for lit in conjunct ]))
return subquery
def lazyCollapseBooleanProofs(left,right):
"""
Function for reduce that (lazily) performs
boolean conjunction operator on a list
of 2-tuples, a boolean value and some object
. The boolean conjunction is applied on the
first item in each 2-tuple
"""
(leftBool,leftNode) = left
(rightBool,rightNode) = right
if not leftBool:
return False, None
else:
return (leftBool and rightBool) and (True,rightNode) or (False,None)
def literalIsGround(literal):
"""
Whether or not the given literal has
any variables for terms
"""
return not [term for term in GetArgs(literal,
secondOrder=True)
if isinstance(term,Variable) ]
def mergeMappings1To2(mapping1,mapping2,makeImmutable=False):
"""
Mapping merge. A 'safe' update (i.e., if the key
exists and the value is different, raise an exception)
An immutable mapping can be returned if requested
"""
newMap = {}
for k,v in mapping1.items():
val2 = mapping2.get(k)
if val2:
assert v == val2,"Failure merging %s to %s"%(mapping1,mapping2)
continue
else:
newMap[k] = mapping1[k]
newMap.update(mapping2)
return makeImmutable and MakeImmutableDict(newMap) or newMap
class RuleFailure(Exception):
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return "RuleFailure: %"%self.msg
class parameterizedPredicate:
def __init__(self, externalVar):
self.externalVar = externalVar
def __call__(self, f):
def _func(item):
return f(item,self.externalVar)
return _func
def invokeRule(priorAnswers,
bodyLiteralIterator,
sip,
otherargs,
priorBooleanGoalSuccess=False,
step = None,
debug = False,
buildProof = False):
"""
Continue invokation of rule using (given) prior answers and list of remaining
body literals (& rule sip). If prior answers is a list, computation is split
disjunctively
[..] By combining the answers to all these subqueries, we generate
answers for the original query involving the rule head
Can also takes a PML step and updates it as it navigates the top-down proof
tree (passing it on and updating it where necessary)
"""
assert not buildProof or step is not None
proofLevel, memoizeMemory, sipCollection, factGraph, derivedPreds, processedRules = otherargs
remainingBodyList = [i for i in bodyLiteralIterator]
lazyGenerator = lazyGeneratorPeek(priorAnswers,2)
if lazyGenerator.successful:
#There are multiple answers in this step, we need to call invokeRule
#recursively for each answer, returning the first positive attempt
success = False
rt = None
_step = None
ansNo = 0
for priorAns in lazyGenerator:
ansNo += 1
try:
if buildProof:
newStep = InferenceStep(step.parent,
step.rule,
source=step.source)
newStep.antecedents = [ant for ant in step.antecedents]
else:
newStep = None
for rt,_step in\
invokeRule([priorAns],
iter([i for i in remainingBodyList]),
sip,
otherargs,
priorBooleanGoalSuccess,
newStep,
debug = debug,
buildProof = buildProof):
if rt:
yield rt,_step
except RuleFailure, e: pass
if not success:
#None of prior answers were successful
#indicate termination of rule processing
raise RuleFailure(
"Unable to solve either of %s against remainder of rule: %s"%(
ansNo,remainingBodyList))
# yield False,_InferenceStep(step.parent,step.rule,source=step.source)
else:
lazyGenerator = lazyGeneratorPeek(lazyGenerator)
projectedBindings = lazyGenerator.successful and first(lazyGenerator) or {}
#First we check if we can combine a large group of subsequent body literals
#into a single query
#if we have a template map then we use it to further
#distinguish which builtins can be solved via
#cumulative SPARQl query - else we solve
#builtins one at a time
def sparqlResolvable(literal):
if isinstance(literal,Uniterm):
return not literal.naf and GetOp(literal) not in derivedPreds
else:
return isinstance(literal,N3Builtin) and \
literal.uri in factGraph.templateMap
def sparqlResolvableNoTemplates(literal):
if isinstance(literal,Uniterm):
return not literal.naf and GetOp(literal) not in derivedPreds
else:
return False
conjGroundLiterals = list(itertools.takewhile(
hasattr(factGraph,'templateMap') and sparqlResolvable or \
sparqlResolvableNoTemplates,
remainingBodyList))
bodyLiteralIterator = iter(remainingBodyList)
if len(conjGroundLiterals)>1:
#If there are literals to combine *and* a mapping from rule
#builtins to SPARQL FILTER templates ..
basePredicateVars = set(
reduce(lambda x,y:x+y,
map(lambda arg:list(GetVariables(arg,secondOrder=True)),
conjGroundLiterals)))
if projectedBindings:
openVars = basePredicateVars.intersection(projectedBindings)
else:
#We don't have any given bindings, so we need to treat
#the body as an open query
openVars = basePredicateVars
queryConj = EDBQuery([copy.deepcopy(lit) for lit in conjGroundLiterals],
factGraph,
openVars,
projectedBindings)
query,answers = queryConj.evaluate(debug)
if isinstance(answers,bool):
combinedAnswers = {}
rtCheck = answers
else:
if projectedBindings:
combinedAnswers = (mergeMappings1To2(ans,
projectedBindings,
makeImmutable=True) for ans in answers )
else:
combinedAnswers = ( MakeImmutableDict(ans) for ans in answers )
combinedAnsLazyGenerator = lazyGeneratorPeek(combinedAnswers)
rtCheck = combinedAnsLazyGenerator.successful
if not rtCheck:
raise RuleFailure("No answers for combined SPARQL query: %s"%query)
else:
#We have solved the previous N body literals with a single
#conjunctive query, now we need to make each of the literals
#an antecedent to a 'query' step.
if buildProof:
queryStep = InferenceStep(None,source='some RDF graph')
queryStep.groundQuery = subquery
queryStep.bindings = {}#combinedAnswers[-1]
queryHash = URIRef("tag:[email protected]:Queries#"+makeMD5Digest(subquery))
queryStep.identifier = queryHash
for subGoal in conjGroundLiterals:
subNs=NodeSet(subGoal.toRDFTuple(),
identifier=BNode())
subNs.steps.append(queryStep)
step.antecedents.append(subNs)
queryStep.parent = subNs
for rt,_step in invokeRule(
isinstance(answers,bool) and [projectedBindings] or combinedAnsLazyGenerator,
iter(remainingBodyList[len(conjGroundLiterals):]),
sip,
otherargs,
isinstance(answers,bool),
step,
debug = debug,
buildProof = buildProof):
yield rt,_step
else:
#Continue processing rule body condition
#one literal at a time
try:
bodyLiteral = bodyLiteralIterator.next()
#if a N3 builtin, execute it using given bindings for boolean answer
#builtins are moved to end of rule when evaluating rules via sip
if isinstance(bodyLiteral,N3Builtin):
lhs = bodyLiteral.argument
rhs = bodyLiteral.result
lhs = isinstance(lhs,Variable) and projectedBindings[lhs] or lhs
rhs = isinstance(rhs,Variable) and projectedBindings[rhs] or rhs
assert lhs is not None and rhs is not None
if bodyLiteral.func(lhs,rhs):
if debug:
print >> sys.stderr, "Invoked %s(%s,%s) -> True"%(
bodyLiteral.uri,
lhs,
rhs)
#positive answer means we can continue processing the rule body
if buildProof:
ns=NodeSet(bodyLiteral.toRDFTuple(),
identifier=BNode())
step.antecedents.append(ns)
for rt,_step in invokeRule(
[projectedBindings],
bodyLiteralIterator,
sip,
otherargs,
step,
priorBooleanGoalSuccess,
debug = debug,
buildProof = buildProof):
yield rt,_step
else:
if debug:
print >> sys.stderr, "Successfully invoked %s(%s,%s) -> False"%(
bodyLiteral.uri,
lhs,
rhs)
raise RuleFailure("Failed builtin invokation %s(%s,%s)"%
(bodyLiteral.uri,
lhs,
rhs))
else:
#For every body literal, subqueries are generated according to the sip
sipArcPred = URIRef(GetOp(bodyLiteral)+'_'+'_'.join(GetArgs(bodyLiteral)))
assert len(list(IncomingSIPArcs(sip,sipArcPred)))<2
subquery = copy.deepcopy(bodyLiteral)
subquery.ground(projectedBindings)
for N,x in IncomingSIPArcs(sip,sipArcPred):
#That is, each subquery contains values for the bound arguments
#that are passed through the sip arcs entering the node
#corresponding to that literal
#Create query out of body literal and apply sip-provided bindings
subquery = copy.deepcopy(bodyLiteral)
subquery.ground(projectedBindings)
if literalIsGround(subquery):
#subquery is ground, so there will only be boolean answers
#we return the conjunction of the answers for the current
#subquery
answer = False
ns = None
answers = first(
itertools.dropwhile(
lambda item:not item[0],
SipStrategy(
subquery.toRDFTuple(),
sipCollection,
factGraph,
derivedPreds,
MakeImmutableDict(projectedBindings),
processedRules,
network = step is not None and \
step.parent.network or None,
debug = debug,
buildProof = buildProof,
memoizeMemory = memoizeMemory,
proofLevel = proofLevel)))
if answers:
answer,ns = answers
if not answer and not bodyLiteral.naf or \
(answer and bodyLiteral.naf):
#negative answer means the invokation of the rule fails
#either because we have a positive literal and there
#is no answer for the subgoal or the literal is
#negative and there is an answer for the subgoal
raise RuleFailure("No solutions solving ground query %s"%subquery)
else:
if buildProof:
if not answer and bodyLiteral.naf:
ns.naf = True
step.antecedents.append(ns)
#positive answer means we can continue processing the rule body
#either because we have a positive literal and answers
#for subgoal or a negative literal and no answers for the
#the goal
for rt,_step in invokeRule(
[projectedBindings],
bodyLiteralIterator,
sip,
otherargs,
True,
step,
debug = debug):
yield rt,_step
else:
_answers = \
SipStrategy(subquery.toRDFTuple(),
sipCollection,
factGraph,
derivedPreds,
MakeImmutableDict(projectedBindings),
processedRules,
network = step is not None and \
step.parent.network or None,
debug = debug,
buildProof = buildProof,
memoizeMemory = memoizeMemory,
proofLevel = proofLevel)
#solve (non-ground) subgoal
def collectAnswers(_ans):
for ans,ns in _ans:
if isinstance(ans,dict):
try:
map = mergeMappings1To2(ans,
projectedBindings,
makeImmutable=True)
yield map
except: pass
combinedAnswers = collectAnswers(_answers)
answers = lazyGeneratorPeek(combinedAnswers)
if not answers.successful and not bodyLiteral.naf or\
(bodyLiteral.naf and answers.successful):
raise RuleFailure("No solutions solving ground query %s"%subquery)
else:
#either we have a positive subgoal and answers
#or a negative subgoal and no answers
if buildProof:
if answers.successful:
goals = set([g for a,g in answers])
assert len(goals)==1
step.antecedents.append(goals.pop())
else:
newNs = NodeSet(
bodyLiteral.toRDFTuple(),
network=step.parent.network,
identifier=BNode(),
naf = True)
step.antecedents.append(newNs)
for rt,_step in invokeRule(
answers,
bodyLiteralIterator,
sip,
otherargs,
priorBooleanGoalSuccess,
step,
debug = debug,
buildProof = buildProof):
yield rt,_step
except StopIteration:
#Finished processing rule
if priorBooleanGoalSuccess:
yield projectedBindings and projectedBindings or True, step
elif projectedBindings:
#Return the most recent (cumulative) answers and the given step
yield projectedBindings, step
else:
raise RuleFailure("Finished processing rule unsuccessfully")
def refactorMapping(keyMapping,origMapping):
"""
Takes a mapping from one mapping domain (D1)
to another mapping domain (D2) as well as a mapping
whose keys are in D1 and returns a new
"""
if keyMapping:
refactoredMapping = {}
for inKey,outKey in keyMapping.items():
if inKey in origMapping:
refactoredMapping[outKey]=origMapping[inKey]
return refactoredMapping
else:
return origMapping
def prepMemiozedAns(ans):
return isinstance(ans,dict) and MakeImmutableDict(ans) or ans
def SipStrategy(query,
sipCollection,
factGraph,
derivedPreds,
bindings={},
processedRules = None,
network = None,
debug = False,
buildProof = False,
memoizeMemory = None,
proofLevel = 1):
"""
Accordingly, we define a sip-strategy for computing the answers to a query
expressed using a set of Datalog rules, and a set of sips, one for each
adornment of a rule head, as follows...
Each evaluation uses memoization (via Python decorators) but also relies on well-formed
rewrites for using semi-naive bottom up method over large SPARQL data.
"""
memoizeMemory = memoizeMemory and memoizeMemory or {}
queryLiteral = BuildUnitermFromTuple(query)
processedRules = processedRules and processedRules or set()
if bindings:
#There are bindings. Apply them to the terms in the query
queryLiteral.ground(bindings)
if debug:
print >> sys.stderr, "%sSolving"%('\t'*proofLevel), queryLiteral, bindings
#Only consider ground triple pattern isomorphism with matching bindings
goalRDFStatement = queryLiteral.toRDFTuple()
if queryLiteral in memoizeMemory:
if debug:
print >> sys.stderr, "%sReturning previously calculated results for "%\
('\t'*proofLevel), queryLiteral
for answers in memoizeMemory[queryLiteral]:
yield answers
elif AlphaNode(goalRDFStatement).alphaNetworkHash(
True,
skolemTerms=bindings.values()) in\
[AlphaNode(r.toRDFTuple()).alphaNetworkHash(True,
skolemTerms=bindings.values())
for r in processedRules
if AdornLiteral(goalRDFStatement).adornment == \
r.adornment]:
if debug:
print >> sys.stderr, "%s Goal already processed..."%\
('\t'*proofLevel)
else:
isGround = literalIsGround(queryLiteral)
if buildProof:
ns=NodeSet(goalRDFStatement,network=network,identifier=BNode())
else:
ns = None
adornedProgram = factGraph.adornedProgram
queryPred = GetOp(queryLiteral)
if sipCollection is None:
rules = []
else:
#For every rule head matching the query, we invoke the rule,
#thus determining an adornment, and selecting a sip to follow
rules = sipCollection.headToRule.get(queryPred,set())
if None in sipCollection.headToRule:
#If there are second order rules, we add them
#since they are a 'wildcard'
rules.update(sipCollection.headToRule[None])
#maintained list of rules that haven't been processed before and
#match the query
validRules = []
#each subquery contains values for the bound arguments that are passed
#through the sip arcs entering the node corresponding to that literal. For
#each subquery generated, there is a set of answers.
answers = []
variableMapping = {}
#Some TBox queries can be 'joined' together into SPARQL queries against
#'base' predicates via an RDF dataset
#These atomic concept inclusion axioms can be evaluated together
#using a disjunctive operator at the body of a horn clause
#where each item is a query of the form uniPredicate(?X):
#Or( uniPredicate1(?X1), uniPredicate2(?X), uniPredicate3(?X),..)
#In this way massive, conjunctive joins can be 'mediated'
#between the stated facts and the top-down solver
@parameterizedPredicate([i for i in derivedPreds])
def IsAtomicInclusionAxiomRHS(rule,dPreds):
"""
This is an atomic inclusion axiom with
a variable (or bound) RHS: uniPred(?ENTITY)
"""
bodyList = list(iterCondition(rule.formula.body))
body = first(bodyList)
return GetOp(body) not in dPreds and \
len(bodyList) == 1 and \
body.op == RDF.type
atomicInclusionAxioms = list(ifilter(IsAtomicInclusionAxiomRHS,rules))
if atomicInclusionAxioms and len(atomicInclusionAxioms) > 1:
if debug:
print >> sys.stderr, "\tCombining atomic inclusion axioms: "
pprint(atomicInclusionAxioms,sys.stderr)
if buildProof:
factStep = InferenceStep(ns,source='some RDF graph')
ns.steps.append(factStep)
axioms = [rule.formula.body
for rule in atomicInclusionAxioms]
#attempt to exaustively apply any available substitutions
#and determine if query if fully ground
vars = [v for v in GetArgs(queryLiteral,
secondOrder=True)
if isinstance(v,Variable)]
openVars,axioms,_bindings = \
normalizeBindingsAndQuery(vars,
bindings,
axioms)
if openVars:
mappings = {}
#See if we need to do any variable mappings from the query literals
#to the literals in the applicable rules
query,rt = EDBQuery(axioms,
factGraph,
openVars,
_bindings).evaluate(debug,
symmAtomicInclusion=True)
if buildProof:
factStep.groundQuery = subquery
for ans in rt:
if buildProof:
factStep.bindings.update(ans)
memoizeMemory.setdefault(queryLiteral,set()).add(
(prepMemiozedAns(ans),ns))
yield ans, ns
else:
#All the relevant derivations have been explored and the result
#is a ground query we can directly execute against the facts
if buildProof:
factStep.bindings.update(bindings)
query,rt = EDBQuery(axioms,
factGraph,
_bindings).evaluate(debug,
symmAtomicInclusion=True)
if buildProof:
factStep.groundQuery = subquery
memoizeMemory.setdefault(queryLiteral,set()).add(
(prepMemiozedAns(rt),ns))
yield rt,ns
rules = ifilter(lambda i:not IsAtomicInclusionAxiomRHS(i),rules)
for rule in rules:
#An exception is the special predicate ph; it is treated as a base
#predicate and the tuples in it are those supplied for qb by unification.
headBindings = getBindingsFromLiteral(goalRDFStatement,rule.formula.head)
comboBindings = dict([(k,v) for k,v in itertools.chain(
bindings.items(),
headBindings.items())])
varMap = rule.formula.head.getVarMapping(queryLiteral)
if headBindings and\
[term for term in rule.formula.head.getDistinguishedVariables(True)
if varMap.get(term,term) not in headBindings]:
continue
subQueryAnswers = []
dontStop = True
projectedBindings = comboBindings.copy()
if debug:
print >> sys.stderr, "%sProcessing rule"%\
('\t'*proofLevel), rule.formula
if debug and sipCollection:
print >>sys.stderr,"Sideways Information Passing (sip) graph for %s: "%queryLiteral
print >>sys.stdout, sipCollection.serialize(format='n3')
for sip in SIPRepresentation(sipCollection):
print >>sys.stderr,sip
try:
#Invoke the rule
if buildProof:
step = InferenceStep(ns,rule.formula)
else:
step = None
for rt,step in\
invokeRule([headBindings],
iter(iterCondition(rule.formula.body)),
rule.sip,
(proofLevel + 1,
memoizeMemory,
sipCollection,
factGraph,
derivedPreds,
processedRules.union([
AdornLiteral(query)])),
step=step,
debug = debug):
if rt:
if isinstance(rt,dict):
#We received a mapping and must rewrite it via
#correlation between the variables in the rule head
#and the variables in the original query (after applying
#bindings)
varMap = rule.formula.head.getVarMapping(queryLiteral)
if varMap:
rt = MakeImmutableDict(refactorMapping(varMap,rt))
if buildProof:
step.bindings = rt
else:
if buildProof:
step.bindings = headBindings
validRules.append(rule)
if buildProof:
ns.steps.append(step)
if isGround:
yield True,ns
else:
memoizeMemory.setdefault(queryLiteral,set()).add(
(prepMemiozedAns(rt),
ns))
yield rt, ns
except RuleFailure, e:
#Clean up failed antecedents
if buildProof:
if ns in step.antecedents:
step.antecedents.remove(ns)
if not validRules:
#No rules matching, query factGraph for answers
successful = False
if buildProof:
factStep = InferenceStep(ns,source='some RDF graph')
ns.steps.append(factStep)
if not isGround:
subquery,rt = EDBQuery([queryLiteral],
factGraph,
[v for v in GetArgs(queryLiteral,
secondOrder=True)
if isinstance(v,Variable)],
bindings).evaluate(debug)
if buildProof:
factStep.groundQuery = subquery
for ans in rt:
successful =True
if buildProof:
factStep.bindings.update(ans)
memoizeMemory.setdefault(queryLiteral,set()).add(
(prepMemiozedAns(ans),
ns))
yield ans, ns
if not successful and queryPred not in derivedPreds:
#Open query didn't return any results and the predicate
#is ostensibly marked as derived predicate, so we have failed
memoizeMemory.setdefault(queryLiteral,set()).add((False,ns))
yield False,ns
else:
#All the relevant derivations have been explored and the result
#is a ground query we can directly execute against the facts
if buildProof:
factStep.bindings.update(bindings)
subquery,rt = EDBQuery([queryLiteral],
factGraph,
bindings).evaluate(debug)
if buildProof:
factStep.groundQuery = subquery
memoizeMemory.setdefault(queryLiteral,set()).add(
(prepMemiozedAns(rt),
ns))
yield rt,ns
def test():
import doctest
doctest.testmod()
if __name__ == '__main__':
test() | PypiClean |
/MuPhyN-0.1.1.post4-py3-none-any.whl/muphyn/packages/interface/properties_pages/moveable_graphical_element_properties_editor.py |
# General Imports
# PyQt6 Imports
from PyQt6.QtCore import QCoreApplication, QPointF, QSizeF, Qt, pyqtSlot
# Project Imports
from muphyn.packages.interface.properties_pages.abstract_properties_editor import AbstractPropertiesEditor
from muphyn.packages.interface.properties_pages.rotation_slider import RotationSlider
from muphyn.packages.interface.models.graphical_models.abstract_moveable_graphical_element import AbstractMoveableGraphicalElement
from muphyn.packages.interface.models.dbl_spin_box import DblSpinBox
from muphyn.packages.interface import PropertyLabel, TitlePropertyLabel
#-----------------------------------
# Class
#-----------------------------------
class MoveableGraphicalElementPropertiesEditor (AbstractPropertiesEditor) :
"""Est la classe permettant de modifier les paramètres d'un élément mobile dans la scène."""
# -------------
# Constructors
# -------------
def __init__ (self, moveable_element : AbstractMoveableGraphicalElement) :
AbstractPropertiesEditor.__init__(self, moveable_element)
self._size_semaphore = True
self._pos_semaphore = True
self._rot_semaphore = True
self._moveable_element : AbstractMoveableGraphicalElement = None
self.moveable_element = moveable_element
self._size_semaphore = False
self._pos_semaphore = False
self._rot_semaphore = False
# -------------
# Properties
# -------------
@property
def moveable_element (self) -> AbstractMoveableGraphicalElement :
"""Permet de récuperer l'élément graphique mobile."""
return self._moveable_element
@moveable_element.setter
def moveable_element (self, moveable_element_ : AbstractMoveableGraphicalElement) -> None :
"""Permet de modifier l'élément graphique mobile."""
if not(self._moveable_element is None) :
self._moveable_element.size_changed.disconnect(self.element_size_changed)
self._moveable_element.position_changed.disconnect(self.element_position_changed)
self._moveable_element.rotation_changed.disconnect(self.element_rotation_changed)
self._moveable_element = moveable_element_
self._model = moveable_element_
if not(self._moveable_element is None) :
self._spn_x.setValue(self._moveable_element.x())
self._spn_y.setValue(self._moveable_element.y())
self._spn_width.setValue(self.moveable_element.size.width())
self._spn_height.setValue(self.moveable_element.size.height())
self._sldr_rotation.setValue(int(self._moveable_element.rotation()))
self._lbl_rotation_value.setText(str(self._moveable_element.rotation()))
self._moveable_element.size_changed.connect(self.element_size_changed)
self._moveable_element.position_changed.connect(self.element_position_changed)
self._moveable_element.rotation_changed.connect(self.element_rotation_changed)
# -------------
# Methods
# -------------
@pyqtSlot()
def spn_pos_value_changed (self) -> None :
"""Est la méthode appelée lorsque l'utilisateur change la position via une des deux spin box."""
if self._pos_semaphore :
return
self._pos_semaphore = True
self._moveable_element.position = QPointF(self._spn_x.value(), self._spn_y.value())
self._pos_semaphore = False
def element_size_changed (self) :
"""Est la méthode appelée lorsque la box change de taille."""
if self._size_semaphore :
return
self._size_semaphore = True
self._spn_width.setValue(self._moveable_element.size.width())
self._spn_height.setValue(self._moveable_element.size.height())
self._size_semaphore = False
@pyqtSlot()
def spn_size_value_changed (self) :
"""Est la méthode appelée lorsque l'utilisateur change la taille via une des deux spin box."""
if self._pos_semaphore :
return
self._pos_semaphore = True
# Get size
new_size = QSizeF(self._spn_width.value(), self._spn_height.value())
if new_size != self._moveable_element.size:
# Set size
self._moveable_element.size = QSizeF(self._spn_width.value(), self._spn_height.value())
# Get size from moveable item
self._spn_width.setValue(self._moveable_element.size.width())
self._spn_height.setValue(self._moveable_element.size.height())
self._pos_semaphore = False
def element_position_changed (self) :
"""Est la méthode appelée lorsque la box change de taille."""
if self._pos_semaphore :
return
self._pos_semaphore = True
self._spn_x.setValue(self._moveable_element.position.x())
self._spn_y.setValue(self._moveable_element.position.y())
self._pos_semaphore = False
def element_rotation_changed (self) :
"""Est la méthode appelée lorsque la box change de taille."""
if self._rot_semaphore :
return
self._rot_semaphore = True
self._sldr_rotation.setValue(self._moveable_element.rotation())
self._lbl_rotation_value.setText(str(self._sldr_rotation.value()))
self._rot_semaphore = False
@pyqtSlot()
def sldr_rotation_value_changed (self) -> None :
"""Est la méthode appelée lorsque l'utilisateur change la rotation de l'élément via le slider."""
if self._rot_semaphore :
return
self._rot_semaphore = True
self._lbl_rotation_value.setText(str(self._sldr_rotation.value()))
self._moveable_element.setRotation(self._sldr_rotation.value())
self._rot_semaphore = False
def init_ui (self) :
if not self.objectName() :
self.setObjectName(u"pnl_moveable_graphical_element_properties_editor")
self._lbl_geometry : TitlePropertyLabel = TitlePropertyLabel()
self._lbl_geometry.setObjectName(u"_lbl_geometry")
# Add Row
row = self.layout().rowCount()
self.layout().addWidget(self._lbl_geometry, row, 0)
self._lbl_x : PropertyLabel = PropertyLabel()
self._lbl_x.setObjectName(u"_lbl_x")
self._spn_x : DblSpinBox = DblSpinBox()
self._spn_x.setObjectName(u"_spn_x")
self._spn_x.setDecimals(0)
self._spn_x.valueChanged.connect(self.spn_pos_value_changed)
# Add Row
row = self.layout().rowCount()
self.layout().addWidget(self._lbl_x, row, 0)
self.layout().addWidget(self._spn_x, row, 1)
self._lbl_y : PropertyLabel = PropertyLabel()
self._lbl_y.setObjectName(u"_lbl_y")
self._spn_y : DblSpinBox = DblSpinBox()
self._spn_y.setObjectName(u"_spn_y")
self._spn_y.setDecimals(0)
self._spn_y.valueChanged.connect(self.spn_pos_value_changed)
# Add Row
row = self.layout().rowCount()
self.layout().addWidget(self._lbl_y, row, 0)
self.layout().addWidget(self._spn_y, row, 1)
self._lbl_width : PropertyLabel = PropertyLabel()
self._lbl_width.setObjectName(u"_lbl_width")
self._spn_width : DblSpinBox = DblSpinBox()
self._spn_width.setObjectName(u"_spn_width")
self._spn_width.setDecimals(0)
self._spn_width.setMinimum(0)
self._spn_width.valueChanged.connect(self.spn_size_value_changed)
# Add Row
row = self.layout().rowCount()
self.layout().addWidget(self._lbl_width, row, 0)
self.layout().addWidget(self._spn_width, row, 1)
self._lbl_height : PropertyLabel = PropertyLabel()
self._lbl_height.setObjectName(u"_lbl_y")
self._spn_height : DblSpinBox = DblSpinBox()
self._spn_height.setObjectName(u"_spn_height")
self._spn_height.setDecimals(0)
self._spn_height.valueChanged.connect(self.spn_size_value_changed)
# Add Row
row = self.layout().rowCount()
self.layout().addWidget(self._lbl_height, row, 0)
self.layout().addWidget(self._spn_height, row, 1)
self._lbl_rotation : PropertyLabel = PropertyLabel()
self._lbl_rotation.setObjectName(u"_lbl_rotation")
self._sldr_rotation : RotationSlider = RotationSlider()
self._sldr_rotation.setOrientation(Qt.Orientation.Horizontal)
self._sldr_rotation.setObjectName(u"_sldr_rotation")
self._sldr_rotation.valueChanged.connect(self.sldr_rotation_value_changed)
# Add Row
row = self.layout().rowCount()
self.layout().addWidget(self._lbl_rotation, row, 0)
self.layout().addWidget(self._sldr_rotation, row, 1)
self._lbl_rotation_value : PropertyLabel = PropertyLabel()
self._lbl_rotation_value.setObjectName(u"_lbl_rotation_value")
# Add Row
row = self.layout().rowCount()
self.layout().addWidget(self._lbl_rotation_value, row, 1)
def translate_ui (self) -> None :
self._lbl_geometry.setText(QCoreApplication.translate(self.objectName(), u"Geometry : ", None))
self._lbl_x.setText(QCoreApplication.translate(self.objectName(), u"X : ", None))
self._lbl_width.setText(QCoreApplication.translate(self.objectName(), u"Width : ", None))
self._lbl_y.setText(QCoreApplication.translate(self.objectName(), u"Y : ", None))
self._lbl_height.setText(QCoreApplication.translate(self.objectName(), u"Height : ", None))
self._lbl_rotation.setText(QCoreApplication.translate(self.objectName(), u"Rotate angle : ", None))
def unload(self) -> None:
pass | PypiClean |
/BGT_Client-1.0.2-py3-none-any.whl/dgt_sdk/protobuf/client_heads_pb2.py |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='dgt_sdk/protobuf/client_heads.proto',
package='',
syntax='proto3',
serialized_options=_b('\n\025sawtooth.sdk.protobufP\001Z\013client_head'),
serialized_pb=_b('\n#dgt_sdk/protobuf/client_heads.proto\"(\n\x15\x43lientHeadsGetRequest\x12\x0f\n\x07head_id\x18\x01 \x01(\t\"\x86\x01\n\x16\x43lientHeadsGetResponse\x12.\n\x06status\x18\x01 \x01(\x0e\x32\x1e.ClientHeadsGetResponse.Status\x12\r\n\x05heads\x18\x02 \x03(\t\"-\n\x06Status\x12\x10\n\x0cSTATUS_UNSET\x10\x00\x12\x06\n\x02OK\x10\x01\x12\t\n\x05\x45RROR\x10\x02\"$\n\x12\x44\x61gGraphGetRequest\x12\x0e\n\x06\x66ormat\x18\x01 \x01(\t\"\x80\x01\n\x13\x44\x61gGraphGetResponse\x12+\n\x06status\x18\x01 \x01(\x0e\x32\x1b.DagGraphGetResponse.Status\x12\r\n\x05graph\x18\x02 \x01(\t\"-\n\x06Status\x12\x10\n\x0cSTATUS_UNSET\x10\x00\x12\x06\n\x02OK\x10\x01\x12\t\n\x05\x45RROR\x10\x02\x42&\n\x15sawtooth.sdk.protobufP\x01Z\x0b\x63lient_headb\x06proto3')
)
_CLIENTHEADSGETRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='ClientHeadsGetResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=171,
serialized_end=216,
)
_sym_db.RegisterEnumDescriptor(_CLIENTHEADSGETRESPONSE_STATUS)
_DAGGRAPHGETRESPONSE_STATUS = _descriptor.EnumDescriptor(
name='Status',
full_name='DagGraphGetResponse.Status',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='STATUS_UNSET', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OK', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=2, number=2,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=171,
serialized_end=216,
)
_sym_db.RegisterEnumDescriptor(_DAGGRAPHGETRESPONSE_STATUS)
_CLIENTHEADSGETREQUEST = _descriptor.Descriptor(
name='ClientHeadsGetRequest',
full_name='ClientHeadsGetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='head_id', full_name='ClientHeadsGetRequest.head_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=79,
)
_CLIENTHEADSGETRESPONSE = _descriptor.Descriptor(
name='ClientHeadsGetResponse',
full_name='ClientHeadsGetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='ClientHeadsGetResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='heads', full_name='ClientHeadsGetResponse.heads', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_CLIENTHEADSGETRESPONSE_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=82,
serialized_end=216,
)
_DAGGRAPHGETREQUEST = _descriptor.Descriptor(
name='DagGraphGetRequest',
full_name='DagGraphGetRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='format', full_name='DagGraphGetRequest.format', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=218,
serialized_end=254,
)
_DAGGRAPHGETRESPONSE = _descriptor.Descriptor(
name='DagGraphGetResponse',
full_name='DagGraphGetResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='status', full_name='DagGraphGetResponse.status', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='graph', full_name='DagGraphGetResponse.graph', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_DAGGRAPHGETRESPONSE_STATUS,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=257,
serialized_end=385,
)
_CLIENTHEADSGETRESPONSE.fields_by_name['status'].enum_type = _CLIENTHEADSGETRESPONSE_STATUS
_CLIENTHEADSGETRESPONSE_STATUS.containing_type = _CLIENTHEADSGETRESPONSE
_DAGGRAPHGETRESPONSE.fields_by_name['status'].enum_type = _DAGGRAPHGETRESPONSE_STATUS
_DAGGRAPHGETRESPONSE_STATUS.containing_type = _DAGGRAPHGETRESPONSE
DESCRIPTOR.message_types_by_name['ClientHeadsGetRequest'] = _CLIENTHEADSGETREQUEST
DESCRIPTOR.message_types_by_name['ClientHeadsGetResponse'] = _CLIENTHEADSGETRESPONSE
DESCRIPTOR.message_types_by_name['DagGraphGetRequest'] = _DAGGRAPHGETREQUEST
DESCRIPTOR.message_types_by_name['DagGraphGetResponse'] = _DAGGRAPHGETRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ClientHeadsGetRequest = _reflection.GeneratedProtocolMessageType('ClientHeadsGetRequest', (_message.Message,), dict(
DESCRIPTOR = _CLIENTHEADSGETREQUEST,
__module__ = 'dgt_sdk.protobuf.client_heads_pb2'
# @@protoc_insertion_point(class_scope:ClientHeadsGetRequest)
))
_sym_db.RegisterMessage(ClientHeadsGetRequest)
ClientHeadsGetResponse = _reflection.GeneratedProtocolMessageType('ClientHeadsGetResponse', (_message.Message,), dict(
DESCRIPTOR = _CLIENTHEADSGETRESPONSE,
__module__ = 'dgt_sdk.protobuf.client_heads_pb2'
# @@protoc_insertion_point(class_scope:ClientHeadsGetResponse)
))
_sym_db.RegisterMessage(ClientHeadsGetResponse)
DagGraphGetRequest = _reflection.GeneratedProtocolMessageType('DagGraphGetRequest', (_message.Message,), dict(
DESCRIPTOR = _DAGGRAPHGETREQUEST,
__module__ = 'dgt_sdk.protobuf.client_heads_pb2'
# @@protoc_insertion_point(class_scope:DagGraphGetRequest)
))
_sym_db.RegisterMessage(DagGraphGetRequest)
DagGraphGetResponse = _reflection.GeneratedProtocolMessageType('DagGraphGetResponse', (_message.Message,), dict(
DESCRIPTOR = _DAGGRAPHGETRESPONSE,
__module__ = 'dgt_sdk.protobuf.client_heads_pb2'
# @@protoc_insertion_point(class_scope:DagGraphGetResponse)
))
_sym_db.RegisterMessage(DagGraphGetResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope) | PypiClean |
/GailBot-0.2a0-py3-none-any.whl/gailbot/core/pipeline/pipeline.py | from typing import List, Dict, Any
from dataclasses import dataclass
from .component import Component, ComponentState, ComponentResult
from gailbot.core.utils.threads import ThreadPool
from gailbot.core.utils.logger import makelogger
import networkx as nx
Failure = ComponentResult(ComponentState.FAILED, None, 0)
logger = makelogger("pipeline")
@dataclass
class DataStream:
data: Any = None
class Pipeline:
"""
Defines a class for the pipeline that runs the dependency map.
"""
def __init__(
self,
dependency_map: Dict[str, List[str]],
components: Dict[str, Component],
num_threads: int,
):
"""
Dependency map describes the execution order.
"""
self.dependency_map = dependency_map
self.components = components
self.threadpool = ThreadPool(num_threads)
self._generate_dependency_graph(dependency_map, components)
def __repr__(self) -> str:
"""
Accesses the pipeline's dependency graph.
Args:
self
Returns:
String representation of the pipeline's dependency graph.d
"""
return str(self.get_dependency_graph())
def __call__(
self,
base_input: Any,
additional_component_kwargs: Dict = dict()
# NOTE: base_input is passed only to the first component.
) -> Dict[str, ComponentState]:
"""
Execute the pipeline by running all components in order of the dependency
graph. This wraps data as DataStream before passing it b/w components.
Additionally, each component receives the output of its dependencies.
Args:
base_input:
a list of input arguments that will be passed to the first
component of the graph
Additional_component_kwargs:
passed as a dereferenced dictionary to each component.
Returns:
Dictionary containing keys mapping to the component states
corresponding to the result of each task.
Note:
each component is contained in a Component class
"""
successors = self.get_dependency_graph()
logger.info(successors)
logger.info(self.dependency_graph)
logger.info(self.components)
name_to_results: Dict[
str, ComponentResult
] = dict() # map component name to result
while True:
# executables is a list of Component who currently has no dependent node
executables: List[Component] = [
c for c, d in self.dependency_graph.in_degree if d == 0
]
# NOTE: bug fix from July 3rd 2023
threadkey_to_exe: Dict[
str, Component
] = dict() # map thread key to executable
# exit the loop if no nodes left.
if len(executables) == 0:
break
for executable in executables:
exe_name: str = self.component_to_name[executable]
dependency_resolved = True
# check the result output of exe_name's dependency component
if len(successors[exe_name]) > 0:
dep_outputs: Dict[str, ComponentResult] = {
k: name_to_results[k] for k in successors[exe_name]
}
else:
dep_outputs = {
"base": ComponentResult(
state=ComponentState.SUCCESS, result=base_input, runtime=0
)
}
for res in dep_outputs.values():
if res.state == ComponentState.FAILED:
name_to_results[exe_name] = Failure
if self.dependency_graph.has_node(executable):
self.dependency_graph.remove_node(executable)
dependency_resolved = False
args = [dep_outputs]
if dependency_resolved:
key = self.threadpool.add_task(
executable, args=args, kwargs=additional_component_kwargs
)
logger.info(f" the component {executable} get the thread key {key}")
threadkey_to_exe[key] = executable
# wait until all tasks finishes before next iteration
self.threadpool.wait_for_all_completion(error_fun=lambda: None)
for key, exe in threadkey_to_exe.items():
# get the task result from the thread pool
exe_res = self.threadpool.get_task_result(key)
self.dependency_graph.remove_node(exe)
name = self.component_to_name[exe]
if exe_res and exe_res.state == ComponentState.SUCCESS:
# add to result if success
name_to_results[name] = exe_res
else:
# add the failed result on failure
name_to_results[name] = Failure
# Regenerate graph
self._generate_dependency_graph(self.dependency_map, self.components)
return {k: v.state for k, v in name_to_results.items()}
def component_names(self) -> List[str]:
"""
Gets names of all components in the dependency map.
Args:
self
Returns:
List of strings containing components.
"""
return list(self.name_to_component.keys())
def is_component(self, name: str) -> bool:
return name in self.component_names()
def component_parents(self, name: str) -> List[str]:
"""
Get the component(s) that the given component is dependent on.
Args:
name: string containing the name of the child component.
Returns:
List of strings of the names of the given component's parent
components.
Raises exception if the given name doesn't correspond to an
existing component.
"""
if not self.is_component(name):
raise Exception(f"No component named {name}")
edges = list(self.dependency_graph.in_edges(self.name_to_component[name]))
return [self.component_to_name[e[0]] for e in edges]
def component_children(self, name: str) -> List[str]:
"""
Gets component(s) that are dependent on the given component.
Args:
name: string containing the name of the child component.
Returns:
List of strings of the names of components that are dependent on the
given component.
Raises exception if the given name doesn't correspond to an
existing component.
"""
if not self.is_component(name):
raise Exception(f"No component named {name}")
edges = list(self.dependency_graph.out_edges(self.name_to_component[name]))
return [self.component_to_name[e[1]] for e in edges]
def get_dependency_graph(self) -> Dict:
"""
Returns a map from each component to the components it is dependent on.
Args:
self
Returns:
Dictionary mapping the given component to the components it is dependent upon.
"""
view = dict()
for name in self.name_to_component:
view[name] = self.component_parents(name)
return view
#####
# PRIVATE METHODS
####
def _does_cycle_exist(self, graph: nx.Graph) -> bool:
"""
Determines if there are existing cycles in the given graph.
Args:
graph: graph in which to determine if there are cycles.
Returns:
True if there are any cycles in the given graph, false if not.
"""
try:
nx.find_cycle(graph, orientation="original")
return True
except:
return False
def _generate_dependency_graph(
self, dependency_map: Dict[str, List[str]], components: Dict[str, Component]
) -> None:
"""
Generates a dependency graph containing components from a given dictionary.
Assumes that the dependency_map keys are in order i.e, a component will
be seen as a key before it is seen as a dependency.
Args:
dependency_map: dictionary containing lists of strings to map between.
components: dictionary containing components to insert into the newly
created dependency graph.
Returns:
A graph of components mapping from the component's name to its dependencies.
Raises exception if an element of the dependency graph does not correspond
to a valid element.
"""
self.dependency_graph = nx.DiGraph()
self.name_to_component = dict()
# Verify that the same keys exist in both dicts
assert (
dependency_map.keys() == components.keys()
), f"Component and dependency maps should have similar keys"
# # Mapping from component name to dependencies
for name, dependencies in dependency_map.items():
# This node must exist as a Component
if not name in components:
raise Exception(f"Node does not exist in the component map: {name}")
# Component cannot be added twice
if self.is_component(name):
raise Exception(f"Repeated component {name}")
# Create a component and add to main graph
self.dependency_graph.add_node(components[name])
# We want to add directed edges from all the dependencies to the
# current node. This implies that the dependencies should already
# exist as nodes.
for dep in dependencies:
if not dep in components:
raise Exception(f"Unseen component added as dependency")
self.dependency_graph.add_edge(components[dep], components[name])
# NOTE: Cycles are not supported
if self._does_cycle_exist(self.dependency_graph):
raise Exception(f"Cycle found in execution logic")
self.name_to_component[name] = components[name]
self.component_to_name: Dict[Component, name] = {
v: k for k, v in self.name_to_component.items()
} | PypiClean |
/NlpToolkit-PropBank-1.0.21.tar.gz/NlpToolkit-PropBank-1.0.21/README.md | Turkish PropBank (TRopBank)
============
Turkish PropBank (TRopBank) is a corpus of over 17.000 Turkish verbs, each annotated with their syntactic arguments and thematic roles. Arguments are bits of essential information attached to a verb (such as subject or object), and thematic roles are semantic classifications associated with these arguments (such as agent or patient). This resource allows matching between the syntax layer and the semantics layer for the processing of Turkish data.
In the field of SRL, PropBank is one of the studies widely recognized by the computational linguistics communities. PropBank is the bank of propositions where predicate- argument information of the corpora is annotated, and the semantic roles or arguments that each verb can take are posited.
Each verb has a frame file, which contains arguments applicable to that verb. Frame files may include more than one roleset with respect to the senses of the given verb. In the roleset of a verb sense, argument labels Arg0 to Arg5 are described according to the meaning of the verb. For the example below, the predicate is “announce” from PropBank, Arg0 is “announcer”, Arg1 is “entity announced”, and ArgM- TMP is “time attribute”.
[<sub>ARG0</sub> Türk Hava Yolları] [<sub>ARG1</sub> indirimli satışlarını] [<sub>ARGM-TMP</sub> bu Pazartesi] [<sub>PREDICATE</sub> açıkladı].
[<sub>ARG0</sub> Turkish Airlines] [<sub>PREDICATE</sub> announced] [<sub>ARG1</sub> its discounted fares] [<sub>ARGM-TMP</sub> this Monday].
The following Table shows typical semantic role types. Only Arg0 and Arg1 indicate the same thematic roles across different verbs: Arg0 stands for the Agent or Causer and Arg1 is the Patient or Theme. The rest of the thematic roles can vary across different verbs. They can stand for Instrument, Start point, End point, Beneficiary, or Attribute. Moreover, PropBank uses ArgM’s as modifier labels indicating time, location, temporal, goal, cause etc., where the role is not specific to a single verb group; it generalizes over the entire corpus instead.
|Tag|Meaning|
|---|---|
|Arg0|Agent or Causer|
|ArgM-EXT|Extent|
|Arg1|Patient or Theme|
|ArgM-LOC|Locatives|
|Arg2|Instrument, start point, end point, beneficiary, or attribute|
|ArgM-CAU|Cause|
|ArgM-MNR|Manner|
|ArgM-DIS|Discourse|
|ArgM-ADV|Adverbials|
|ArgM-DIR|Directionals|
|ArgM-PNC|Purpose|
|ArgM-TMP|Temporals|
+ Directional modifiers give information regarding the path of motion in the sentence. Directional modifiers may be mistakenly tagged as locatives.
+ Locatives are used for the place where the action takes place.
+ Manners define how the action is performed.
+ Extent markers represent the amount of change that occurs in the action.
+ Temporal modifiers keep the time of the action.
+ Reciprocals are reflexives that refer to other arguments, like “himself,” “itself,” “together,” “each other,” and “both.”
+ Secondary predication markers are used for adjuncts of the predicate, which holds predicate structure.
+ Purpose clauses show the motivation for the action. Cause clauses simply show the reason for an action.
+ Discourse markers connect the sentence to the previous sentence, such as “also,” “however,” “as well,” and “but.”
+ Adverbials are used for syntactic elements that modify the sentence and are not labeled with one of the modifier tags stated above.
+ “Will,” “may,” “can,” “must,” “shall,” “might,” “should,” “could,” “would,” and also “going (to),” “have (to),” and “used (to)” are modality adjuncts of the predicate and tagged as modal in PropBank.
+ Negation is used to tag negative markers of the sentences.
## Data Format
The structure of a sample frameset is as follows:
<FRAMESET id="TR10-0006410">
<ARG name="ARG0">Engeli kaldıran kişi</ARG>
<ARG name="ARG1">Engelini kaldırdığı şey</ARG>
</FRAMESET>
Each entry in the frame file is enclosed by <FRAMESET> and </FRAMESET> tags. id shows the unique identifier given to the frameset, which is the same ID in the synset file of the corresponding verb sense. <ARG> tags denote the semantic roles of the corresponding frame.
Video Lectures
============
[<img src="https://github.com/StarlangSoftware/TurkishPropBank/blob/master/video.jpg" width="50%">](https://youtu.be/TeVnGaYuORQ)
For Developers
============
You can also see [Cython](https://github.com/starlangsoftware/TurkishPropBank-Cy), [Java](https://github.com/starlangsoftware/TurkishPropBank), [C++](https://github.com/starlangsoftware/TurkishPropBank-CPP), [Swift](https://github.com/starlangsoftware/TurkishPropBank-Swift), [Js](https://github.com/starlangsoftware/TurkishPropBank-Js), or [C#](https://github.com/starlangsoftware/TurkishPropBank-CS) repository.
## Requirements
* [Python 3.7 or higher](#python)
* [Git](#git)
### Python
To check if you have a compatible version of Python installed, use the following command:
python -V
You can find the latest version of Python [here](https://www.python.org/downloads/).
### Git
Install the [latest version of Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git).
## Pip Install
pip3 install NlpToolkit-Propbank
## Download Code
In order to work on code, create a fork from GitHub page.
Use Git for cloning the code to your local or below line for Ubuntu:
git clone <your-fork-git-link>
A directory called PropBank will be created. Or you can use below link for exploring the code:
git clone https://github.com/starlangsoftware/TurkishPropBank-Py.git
## Open project with Pycharm IDE
Steps for opening the cloned project:
* Start IDE
* Select **File | Open** from main menu
* Choose `TurkishPropBank-PY` file
* Select open as project option
* Couple of seconds, dependencies will be downloaded.
Detailed Description
============
+ [FramesetList](#framesetlist)
+ [Frameset](#frameset)
## FramesetList
Frame listesini okumak ve tüm Frameleri hafızada tutmak için
a = FramesetList()
Framesetleri tek tek gezmek için
for i in range(a.size()):
frameset = a.getFrameset(i)
Bir fiile ait olan Frameseti bulmak için
frameset = a.getFrameSet("TUR10-1234560")
## Frameset
Bir framesetin tüm argümanlarını bulmak için
getFramesetArguments(self) -> list
# Cite
@inproceedings{kara-etal-2020-tropbank,
title = "{TR}op{B}ank: {T}urkish {P}rop{B}ank V2.0",
author = {Kara, Neslihan and
Aslan, Deniz Baran and
Mar{\c{s}}an, B{\"u}{\c{s}}ra and
Bakay, {\"O}zge and
Ak, Koray and
Y{\i}ld{\i}z, Olcay Taner},
booktitle = "Proceedings of the 12th Language Resources and Evaluation Conference",
month = may,
year = "2020",
address = "Marseille, France",
publisher = "European Language Resources Association",
url = "https://www.aclweb.org/anthology/2020.lrec-1.336",
pages = "2763--2772",
ISBN = "979-10-95546-34-4",
}
| PypiClean |
/FlaskBB-2.0.2.tar.gz/FlaskBB-2.0.2/flaskbb/cli/main.py | import binascii
import logging
import os
import sys
import time
import traceback
from datetime import datetime
import click
import click_log
import requests
from celery.bin.celery import CeleryCommand
from flask import current_app
from flask.cli import FlaskGroup, ScriptInfo, with_appcontext
from flask_alembic import alembic_click
from jinja2 import Environment, FileSystemLoader
from sqlalchemy_utils.functions import database_exists
from werkzeug.utils import import_string
from flaskbb import create_app
from flaskbb.cli.utils import (EmailType, FlaskBBCLIError, get_version,
prompt_config_path, prompt_save_user,
write_config)
from flaskbb.extensions import alembic, celery, db, whooshee
from flaskbb.utils.populate import (create_default_groups,
create_default_settings, create_latest_db,
create_test_data, create_welcome_forum,
insert_bulk_data, run_plugin_migrations,
update_settings_from_fixture)
from flaskbb.utils.translations import compile_translations
logger = logging.getLogger(__name__)
click_log.basic_config(logger)
class FlaskBBGroup(FlaskGroup):
def __init__(self, *args, **kwargs):
super(FlaskBBGroup, self).__init__(*args, **kwargs)
self._loaded_flaskbb_plugins = False
def _load_flaskbb_plugins(self, ctx):
if self._loaded_flaskbb_plugins:
return
try:
app = ctx.ensure_object(ScriptInfo).load_app()
app.pluggy.hook.flaskbb_cli(cli=self, app=app)
self._loaded_flaskbb_plugins = True
except Exception as exc:
logger.error(
"Error while loading CLI Plugins",
exc_info=traceback.format_exc()
)
else:
shell_context_processors = app.pluggy.hook.flaskbb_shell_context()
for p in shell_context_processors:
app.shell_context_processor(p)
def get_command(self, ctx, name):
self._load_flaskbb_plugins(ctx)
return super(FlaskBBGroup, self).get_command(ctx, name)
def list_commands(self, ctx):
self._load_flaskbb_plugins(ctx)
return super(FlaskBBGroup, self).list_commands(ctx)
def make_app(script_info):
config_file = getattr(script_info, "config_file", None)
instance_path = getattr(script_info, "instance_path", None)
return create_app(config_file, instance_path)
def set_config(ctx, param, value):
"""This will pass the config file to the create_app function."""
ctx.ensure_object(ScriptInfo).config_file = value
def set_instance(ctx, param, value):
"""This will pass the instance path on the script info which can then
be used in 'make_app'."""
ctx.ensure_object(ScriptInfo).instance_path = value
@click.group(cls=FlaskBBGroup, create_app=make_app, add_version_option=False,
invoke_without_command=True)
@click.option("--config", expose_value=False, callback=set_config,
required=False, is_flag=False, is_eager=True, metavar="CONFIG",
help="Specify the config to use either in dotted module "
"notation e.g. 'flaskbb.configs.default.DefaultConfig' "
"or by using a path like '/path/to/flaskbb.cfg'")
@click.option("--instance", expose_value=False, callback=set_instance,
required=False, is_flag=False, is_eager=True, metavar="PATH",
help="Specify the instance path to use. By default the folder "
"'instance' next to the package or module is assumed to "
"be the instance path.")
@click.option("--version", expose_value=False, callback=get_version,
is_flag=True, is_eager=True, help="Show the FlaskBB version.")
@click.pass_context
@click_log.simple_verbosity_option(logger)
def flaskbb(ctx):
"""This is the commandline interface for flaskbb."""
if ctx.invoked_subcommand is None:
# show the help text instead of an error
# when just '--config' option has been provided
click.echo(ctx.get_help())
flaskbb.add_command(alembic_click, "db")
@flaskbb.command()
@click.option("--welcome", "-w", default=True, is_flag=True,
help="Disable the welcome forum.")
@click.option("--force", "-f", default=False, is_flag=True,
help="Doesn't ask for confirmation.")
@click.option("--username", "-u", help="The username of the user.")
@click.option("--email", "-e", type=EmailType(),
help="The email address of the user.")
@click.option("--password", "-p", help="The password of the user.")
@click.option("--no-plugins", "-n", default=False, is_flag=True,
help="Don't run the migrations for the default plugins.")
@with_appcontext
def install(welcome, force, username, email, password, no_plugins):
"""Installs flaskbb. If no arguments are used, an interactive setup
will be run.
"""
click.secho("[+] Installing FlaskBB...", fg="cyan")
if database_exists(db.engine.url):
if force or click.confirm(click.style(
"Existing database found. Do you want to delete the old one and "
"create a new one?", fg="magenta")
):
db.drop_all()
else:
sys.exit(0)
# creating database from scratch and 'stamping it'
create_latest_db()
click.secho("[+] Creating default settings...", fg="cyan")
create_default_groups()
create_default_settings()
click.secho("[+] Creating admin user...", fg="cyan")
prompt_save_user(username, email, password, "admin")
if welcome:
click.secho("[+] Creating welcome forum...", fg="cyan")
create_welcome_forum()
if not no_plugins:
click.secho("[+] Installing default plugins...", fg="cyan")
run_plugin_migrations()
click.secho("[+] Compiling translations...", fg="cyan")
compile_translations()
click.secho("[+] FlaskBB has been successfully installed!",
fg="green", bold=True)
@flaskbb.command()
@click.option("--test-data", "-t", default=False, is_flag=True,
help="Adds some test data.")
@click.option("--bulk-data", "-b", default=False, is_flag=True,
help="Adds a lot of data.")
@click.option("--posts", default=100,
help="Number of posts to create in each topic (default: 100).")
@click.option("--topics", default=100,
help="Number of topics to create (default: 100).")
@click.option("--force", "-f", is_flag=True,
help="Will delete the database before populating it.")
@click.option("--initdb", "-i", is_flag=True,
help="Initializes the database before populating it.")
def populate(bulk_data, test_data, posts, topics, force, initdb):
"""Creates the necessary tables and groups for FlaskBB."""
if force:
click.secho("[+] Recreating database...", fg="cyan")
db.drop_all()
# do not initialize the db if -i is passed
if not initdb:
create_latest_db()
if initdb:
click.secho("[+] Initializing database...", fg="cyan")
create_latest_db()
run_plugin_migrations()
if test_data:
click.secho("[+] Adding some test data...", fg="cyan")
create_test_data()
if bulk_data:
click.secho("[+] Adding a lot of test data...", fg="cyan")
timer = time.time()
rv = insert_bulk_data(int(topics), int(posts))
if not rv and not test_data:
create_test_data()
rv = insert_bulk_data(int(topics), int(posts))
elapsed = time.time() - timer
click.secho("[+] It took {:.2f} seconds to create {} topics and {} "
"posts.".format(elapsed, rv[0], rv[1]), fg="cyan")
# this just makes the most sense for the command name; use -i to
# init the db as well
if not test_data and not bulk_data:
click.secho("[+] Populating the database with some defaults...",
fg="cyan")
create_default_groups()
create_default_settings()
@flaskbb.command()
def reindex():
"""Reindexes the search index."""
click.secho("[+] Reindexing search index...", fg="cyan")
whooshee.reindex()
@flaskbb.command()
@click.option("all_latest", "--all", "-a", default=False, is_flag=True,
help="Upgrades migrations AND fixtures to the latest version.")
@click.option("--fixture/", "-f", default=None,
help="The fixture which should be upgraded or installed.")
@click.option("--force", default=False, is_flag=True,
help="Forcefully upgrades the fixtures.")
def upgrade(all_latest, fixture, force):
"""Updates the migrations and fixtures."""
if all_latest:
click.secho("[+] Upgrading migrations to the latest version...",
fg="cyan")
alembic.upgrade()
if fixture or all_latest:
try:
settings = import_string(
"flaskbb.fixtures.{}".format(fixture)
)
settings = settings.fixture
except ImportError:
raise FlaskBBCLIError("{} fixture is not available"
.format(fixture), fg="red")
click.secho("[+] Updating fixtures...", fg="cyan")
count = update_settings_from_fixture(
fixture=settings, overwrite_group=force, overwrite_setting=force
)
click.secho("[+] {settings} settings in {groups} setting groups "
"updated.".format(groups=len(count), settings=sum(
len(settings) for settings in count.values())
), fg="green")
@flaskbb.command("celery", add_help_option=False,
context_settings={"ignore_unknown_options": True,
"allow_extra_args": True})
@click.pass_context
@with_appcontext
def start_celery(ctx):
"""Preconfigured wrapper around the 'celery' command."""
CeleryCommand(celery).execute_from_commandline(
["flaskbb celery"] + ctx.args
)
@flaskbb.command("shell", short_help="Runs a shell in the app context.")
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it"s configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
This code snippet is taken from Flask"s cli module and modified to
run IPython and falls back to the normal shell if IPython is not
available.
"""
import code
banner = "Python %s on %s\nInstance Path: %s" % (
sys.version,
sys.platform,
current_app.instance_path,
)
ctx = {"db": db}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(current_app.make_shell_context())
try:
import IPython
IPython.embed(banner1=banner, user_ns=ctx)
except ImportError:
code.interact(banner=banner, local=ctx)
@flaskbb.command("urls", short_help="Show routes for the app.")
@click.option("--route", "-r", "order_by", flag_value="rule", default=True,
help="Order by route")
@click.option("--endpoint", "-e", "order_by", flag_value="endpoint",
help="Order by endpoint")
@click.option("--methods", "-m", "order_by", flag_value="methods",
help="Order by methods")
@with_appcontext
def list_urls(order_by):
"""Lists all available routes."""
from flask import current_app
rules = sorted(
current_app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order_by)
)
max_rule_len = max(len(rule.rule) for rule in rules)
max_rule_len = max(max_rule_len, len("Route"))
max_endpoint_len = max(len(rule.endpoint) for rule in rules)
max_endpoint_len = max(max_endpoint_len, len("Endpoint"))
max_method_len = max(len(", ".join(rule.methods)) for rule in rules)
max_method_len = max(max_method_len, len("Methods"))
column_header_len = max_rule_len + max_endpoint_len + max_method_len + 4
column_template = "{:<%s} {:<%s} {:<%s}" % (
max_rule_len, max_endpoint_len, max_method_len
)
click.secho(column_template.format("Route", "Endpoint", "Methods"),
fg="blue", bold=True)
click.secho("=" * column_header_len, bold=True)
for rule in rules:
methods = ", ".join(rule.methods)
click.echo(column_template.format(rule.rule, rule.endpoint, methods))
@flaskbb.command("makeconfig")
@click.option("--development", "-d", default=False, is_flag=True,
help="Creates a development config with DEBUG set to True.")
@click.option("--output", "-o", required=False,
help="The path where the config file will be saved at. "
"Defaults to the flaskbb's root folder.")
@click.option("--force", "-f", default=False, is_flag=True,
help="Overwrite any existing config file if one exists.")
def generate_config(development, output, force):
"""Generates a FlaskBB configuration file."""
config_env = Environment(
loader=FileSystemLoader(os.path.join(current_app.root_path, "configs"))
)
config_template = config_env.get_template('config.cfg.template')
if output:
config_path = os.path.abspath(output)
else:
config_path = os.path.dirname(current_app.root_path)
if os.path.exists(config_path) and not os.path.isfile(config_path):
config_path = os.path.join(config_path, "flaskbb.cfg")
# An override to handle database location paths on Windows environments
database_path = "sqlite:///" + os.path.join(
os.path.dirname(current_app.instance_path), "flaskbb.sqlite")
if os.name == 'nt':
database_path = database_path.replace("\\", r"\\")
default_conf = {
"is_debug": False,
"server_name": "example.org",
"use_https": True,
"database_uri": database_path,
"redis_enabled": False,
"redis_uri": "redis://localhost:6379",
"mail_server": "localhost",
"mail_port": 25,
"mail_use_tls": False,
"mail_use_ssl": False,
"mail_username": "",
"mail_password": "",
"mail_sender_name": "FlaskBB Mailer",
"mail_sender_address": "noreply@yourdomain",
"mail_admin_address": "admin@yourdomain",
"secret_key": binascii.hexlify(os.urandom(24)).decode(),
"csrf_secret_key": binascii.hexlify(os.urandom(24)).decode(),
"timestamp": datetime.utcnow().strftime("%A, %d. %B %Y at %H:%M"),
"log_config_path": "",
}
if not force:
config_path = prompt_config_path(config_path)
if force and os.path.exists(config_path):
click.secho("Overwriting existing config file: {}".format(config_path),
fg="yellow")
if development:
default_conf["is_debug"] = True
default_conf["use_https"] = False
default_conf["server_name"] = "localhost:5000"
write_config(default_conf, config_template, config_path)
sys.exit(0)
# SERVER_NAME
click.secho("The name and port number of the exposed server.\n"
"If FlaskBB is accesible on port 80 you can just omit the "
"port.\n For example, if FlaskBB is accessible via "
"example.org:8080 than this is also what you would set here.",
fg="cyan")
default_conf["server_name"] = click.prompt(
click.style("Server Name", fg="magenta"), type=str,
default=default_conf.get("server_name"))
# HTTPS or HTTP
click.secho("Is HTTPS (recommended) or HTTP used for to serve FlaskBB?",
fg="cyan")
default_conf["use_https"] = click.confirm(
click.style("Use HTTPS?", fg="magenta"),
default=default_conf.get("use_https"))
# SQLALCHEMY_DATABASE_URI
click.secho("For Postgres use:\n"
" postgresql://flaskbb@localhost:5432/flaskbb\n"
"For more options see the SQLAlchemy docs:\n"
" http://docs.sqlalchemy.org/en/latest/core/engines.html",
fg="cyan")
default_conf["database_uri"] = click.prompt(
click.style("Database URI", fg="magenta"),
default=default_conf.get("database_uri"))
# REDIS_ENABLED
click.secho("Redis will be used for things such as the task queue, "
"caching and rate limiting.", fg="cyan")
default_conf["redis_enabled"] = click.confirm(
click.style("Would you like to use redis?", fg="magenta"),
default=True) # default_conf.get("redis_enabled") is False
# REDIS_URI
if default_conf.get("redis_enabled", False):
default_conf["redis_uri"] = click.prompt(
click.style("Redis URI", fg="magenta"),
default=default_conf.get("redis_uri"))
else:
default_conf["redis_uri"] = ""
# MAIL_SERVER
click.secho("To use 'localhost' make sure that you have sendmail or\n"
"something similar installed. Gmail is also supprted.",
fg="cyan")
default_conf["mail_server"] = click.prompt(
click.style("Mail Server", fg="magenta"),
default=default_conf.get("mail_server"))
# MAIL_PORT
click.secho("The port on which the SMTP server is listening on.",
fg="cyan")
default_conf["mail_port"] = click.prompt(
click.style("Mail Server SMTP Port", fg="magenta"),
default=default_conf.get("mail_port"))
# MAIL_USE_TLS
click.secho("If you are using a local SMTP server like sendmail this is "
"not needed. For external servers it is required.",
fg="cyan")
default_conf["mail_use_tls"] = click.confirm(
click.style("Use TLS for sending mails?", fg="magenta"),
default=default_conf.get("mail_use_tls"))
# MAIL_USE_SSL
click.secho("Same as above. TLS is the successor to SSL.", fg="cyan")
default_conf["mail_use_ssl"] = click.confirm(
click.style("Use SSL for sending mails?", fg="magenta"),
default=default_conf.get("mail_use_ssl"))
# MAIL_USERNAME
click.secho("Not needed if you are using a local smtp server.\nFor gmail "
"you have to put in your email address here.", fg="cyan")
default_conf["mail_username"] = click.prompt(
click.style("Mail Username", fg="magenta"),
default=default_conf.get("mail_username"))
# MAIL_PASSWORD
click.secho("Not needed if you are using a local smtp server.\nFor gmail "
"you have to put in your gmail password here.", fg="cyan")
default_conf["mail_password"] = click.prompt(
click.style("Mail Password", fg="magenta"),
default=default_conf.get("mail_password"))
# MAIL_DEFAULT_SENDER
click.secho("The name of the sender. You probably want to change it to "
"something like '<your_community> Mailer'.", fg="cyan")
default_conf["mail_sender_name"] = click.prompt(
click.style("Mail Sender Name", fg="magenta"),
default=default_conf.get("mail_sender_name"))
click.secho("On localhost you want to use a noreply address here. "
"Use your email address for gmail here.", fg="cyan")
default_conf["mail_sender_address"] = click.prompt(
click.style("Mail Sender Address", fg="magenta"),
default=default_conf.get("mail_sender_address"))
# ADMINS
click.secho("Logs and important system messages are sent to this address. "
"Use your email address for gmail here.", fg="cyan")
default_conf["mail_admin_address"] = click.prompt(
click.style("Mail Admin Email", fg="magenta"),
default=default_conf.get("mail_admin_address"))
click.secho("Optional filepath to load a logging configuration file from. "
"See the Python logging documentation for more detail.\n"
"\thttps://docs.python.org/library/logging.config.html#logging-config-fileformat",
fg="cyan")
default_conf["log_config_path"] = click.prompt(
click.style("Logging Config Path", fg="magenta"),
default=default_conf.get("log_config_path"))
write_config(default_conf, config_template, config_path)
# Finished
click.secho("The configuration file has been saved to:\n{cfg}\n"
"Feel free to adjust it as needed."
.format(cfg=config_path), fg="blue", bold=True)
click.secho("Usage: \nflaskbb --config {cfg} run"
.format(cfg=config_path), fg="green") | PypiClean |
/ClusterShell-1.9.1.tar.gz/ClusterShell-1.9.1/doc/sphinx/config.rst | Configuration
=============
.. highlight:: ini
clush
-----
.. _clush-config:
clush.conf
^^^^^^^^^^
The following configuration file defines system-wide default values for
several ``clush`` tool parameters::
/etc/clustershell/clush.conf
``clush`` settings might then be overridden (globally, or per user) if one of
the following files is found, in priority order::
$XDG_CONFIG_HOME/clustershell/clush.conf
$HOME/.config/clustershell/clush.conf (only if $XDG_CONFIG_HOME is not defined)
{sys.prefix}/etc/clustershell/clush.conf
$HOME/.local/etc/clustershell/clush.conf
$HOME/.clush.conf (deprecated, for 1.6 compatibility only)
.. note:: The path using `sys.prefix`_ was added in version 1.9.1 and is
useful for Python virtual environments.
In addition, if the environment variable ``$CLUSTERSHELL_CFGDIR`` is defined and
valid, it will used instead. In such case, the following configuration file
will be tried first for ``clush``::
$CLUSTERSHELL_CFGDIR/clush.conf
The following table describes available ``clush`` config file settings.
+-----------------+----------------------------------------------------+
| Key | Value |
+=================+====================================================+
| fanout | Size of the sliding window of connectors (eg. max |
| | number of *ssh(1)* allowed to run at the same |
| | time). |
+-----------------+----------------------------------------------------+
| confdir | Optional list of directory paths where ``clush`` |
| | should look for **.conf** files which define |
| | :ref:`run modes <clushmode-config>` that can then |
| | be activated with `--mode`. All other ``clush`` |
| | config file settings defined in this table might |
| | be overridden in a run mode. Each mode section |
| | should have a name prefixed by "mode:" to clearly |
| | identify a section defining a mode. Duplicate |
| | modes are not allowed in those files. |
| | Configuration files that are not readable by the |
| | current user are ignored. The variable `$CFGDIR` |
| | is replaced by the path of the highest priority |
| | configuration directory found (where *clush.conf* |
| | resides). The default *confdir* value enables both |
| | system-wide and any installed user configuration |
| | (thanks to `$CFGDIR`). Duplicate directory paths |
| | are ignored. |
+-----------------+----------------------------------------------------+
| connect_timeout | Timeout in seconds to allow a connection to |
| | establish. This parameter is passed to *ssh(1)*. |
| | If set to 0, no timeout occurs. |
+-----------------+----------------------------------------------------+
| command_prefix | Command prefix. Generally used for specific |
| | :ref:`run modes <clush-modes>`, for example to |
| | implement *sudo(8)* support. |
+-----------------+----------------------------------------------------+
| command_timeout | Timeout in seconds to allow a command to complete |
| | since the connection has been established. This |
| | parameter is passed to *ssh(1)*. In addition, the |
| | ClusterShell library ensures that any commands |
| | complete in less than (connect_timeout \+ |
| | command_timeout). If set to 0, no timeout occurs. |
+-----------------+----------------------------------------------------+
| color | Whether to use ANSI colors to surround node |
| | or nodeset prefix/header with escape sequences to |
| | display them in color on the terminal. Valid |
| | arguments are *never*, *always* or *auto* (which |
| | use color if standard output/error refer to a |
| | terminal). |
| | Colors are set to ``[34m`` (blue foreground text) |
| | for stdout and ``[31m`` (red foreground text) for |
| | stderr, and cannot be modified. |
+-----------------+----------------------------------------------------+
| fd_max | Maximum number of open file descriptors |
| | permitted per ``clush`` process (soft resource |
| | limit for open files). This limit can never exceed |
| | the system (hard) limit. The *fd_max* (soft) and |
| | system (hard) limits should be high enough to |
| | run ``clush``, although their values depend on |
| | your fanout value. |
+-----------------+----------------------------------------------------+
| history_size | Set the maximum number of history entries saved in |
| | the GNU readline history list. Negative values |
| | imply unlimited history file size. |
+-----------------+----------------------------------------------------+
| node_count | Should ``clush`` display additional (node count) |
| | information in buffer header? (yes/no) |
+-----------------+----------------------------------------------------+
| maxrc | Should ``clush`` return the largest of command |
| | return codes? (yes/no) |
| | If set to no (the default), ``clush`` exit status |
| | gives no information about command return codes, |
| | but rather reports on ``clush`` execution itself |
| | (zero indicating a successful run). |
+-----------------+----------------------------------------------------+
| password_prompt | Enable password prompt and password forwarding to |
| | stdin? (yes/no) |
| | Generally used for specific |
| | :ref:`run modes <clush-modes>`, for example to |
| | implement interactive *sudo(8)* support. |
+-----------------+----------------------------------------------------+
| verbosity | Set the verbosity level: 0 (quiet), 1 (default), |
| | 2 (verbose) or more (debug). |
+-----------------+----------------------------------------------------+
| ssh_user | Set the *ssh(1)* user to use for remote connection |
| | (default is to not specify). |
+-----------------+----------------------------------------------------+
| ssh_path | Set the *ssh(1)* binary path to use for remote |
| | connection (default is *ssh*). |
+-----------------+----------------------------------------------------+
| ssh_options | Set additional (raw) options to pass to the |
| | underlying *ssh(1)* command. |
+-----------------+----------------------------------------------------+
| scp_path | Set the *scp(1)* binary path to use for remote |
| | copy (default is *scp*). |
+-----------------+----------------------------------------------------+
| scp_options | Set additional options to pass to the underlying |
| | *scp(1)* command. If not specified, *ssh_options* |
| | are used instead. |
+-----------------+----------------------------------------------------+
| rsh_path | Set the *rsh(1)* binary path to use for remote |
| | connection (default is *rsh*). You could easily |
| | use *mrsh* or *krsh* by simply changing this |
| | value. |
+-----------------+----------------------------------------------------+
| rcp_path | Same as *rsh_path* but for rcp command (default is |
| | *rcp*). |
+-----------------+----------------------------------------------------+
| rsh_options | Set additional options to pass to the underlying |
| | rsh/rcp command. |
+-----------------+----------------------------------------------------+
.. _clushmode-config:
Run modes
^^^^^^^^^
Since version 1.9, ``clush`` has support for run modes, which are special
:ref:`clush-config` settings with a given name. Two run modes are provided in
example configuration files that can be copied and modified. They implement
password-based authentication with *sshpass(1)* and support of interactive
*sudo(8)* with password.
To use a run mode with ``clush --mode``, install a configuration file in one
of :ref:`clush-config`'s ``confdir`` (usually ``clush.conf.d``). Only
configuration files ending in **.conf** are scanned. If the user running
``clush`` doesn't have read access to a configuration file, it is ignored.
When ``--mode`` is specified, you can display all available run modes for
the current user by enabling debug mode (``-d``).
Example of a run mode configuration file (eg.
``/etc/clustershell/clush.conf.d/sudo.conf``) to add support for interactive
sudo::
[mode:sudo]
password_prompt: yes
command_prefix: /usr/bin/sudo -S -p "''"
System administrators or users can easily create additional run modes by
adding configuration files to :ref:`clush-config`'s ``confdir``.
More details about using run modes can be found :ref:`here <clush-modes>`.
.. _groups-config:
Node groups
-----------
ClusterShell defines a *node group* syntax to represent a collection of nodes.
This is a convenient way to manipulate node sets, especially in HPC (High
Performance Computing) or with large server farms. This section explains how
to configure node group **sources**. Please see also :ref:`nodeset node groups
<nodeset-groups>` for specific usage examples.
.. _groups_config_conf:
groups.conf
^^^^^^^^^^^
ClusterShell loads *groups.conf* configuration files that define how to
obtain node groups configuration, ie. the way the library should access
file-based or external node group **sources**.
The following configuration file defines system-wide default values for
*groups.conf*::
/etc/clustershell/groups.conf
*groups.conf* settings might then be overridden (globally, or per user) if one
of the following files is found, in priority order::
$XDG_CONFIG_HOME/clustershell/groups.conf
$HOME/.config/clustershell/groups.conf (only if $XDG_CONFIG_HOME is not defined)
{sys.prefix}/etc/clustershell/groups.conf
$HOME/.local/etc/clustershell/groups.conf
.. note:: The path using `sys.prefix`_ was added in version 1.9.1 and is
useful for Python virtual environments.
In addition, if the environment variable ``$CLUSTERSHELL_CFGDIR`` is defined and
valid, it will used instead. In such case, the following configuration file
will be tried first for *groups.conf*::
$CLUSTERSHELL_CFGDIR/groups.conf
This makes possible for an user to have its own *node groups* configuration.
If no readable configuration file is found, group support will be disabled but
other node set operations will still work.
*groups.conf* defines configuration sub-directories, but may also define
source definitions by itself. These **sources** provide external calls that
are detailed in :ref:`group-external-sources`.
The following example shows the content of a *groups.conf* file where node
groups are bound to the source named *genders* by default::
[Main]
default: genders
confdir: /etc/clustershell/groups.conf.d $CFGDIR/groups.conf.d
autodir: /etc/clustershell/groups.d $CFGDIR/groups.d
[genders]
map: nodeattr -n $GROUP
all: nodeattr -n ALL
list: nodeattr -l
[slurm]
map: sinfo -h -o "%N" -p $GROUP
all: sinfo -h -o "%N"
list: sinfo -h -o "%P"
reverse: sinfo -h -N -o "%P" -n $NODE
The *groups.conf* files are parsed with Python's `ConfigParser`_:
* The first section whose name is *Main* accepts the following keywords:
* *default* defines a **default node group source** (eg. by referencing a
valid section header)
* *confdir* defines an optional list of directory paths where the
ClusterShell library should look for **.conf** files which define group
sources to use. Each file in these directories with the .conf suffix
should contain one or more node group source sections as documented below.
These will be merged with the group sources defined in the main
*groups.conf* to form the complete set of group sources to use. Duplicate
group source sections are not allowed in those files. Configuration files
that are not readable by the current user are ignored (except the one that
defines the default group source). The variable `$CFGDIR` is replaced by
the path of the highest priority configuration directory found (where
*groups.conf* resides). The default *confdir* value enables both
system-wide and any installed user configuration (thanks to `$CFGDIR`).
Duplicate directory paths are ignored.
* *autodir* defines an optional list of directories where the ClusterShell
library should look for **.yaml** files that define in-file group
dictionaries. No need to call external commands for these files, they are
parsed by the ClusterShell library itself. Multiple group source
definitions in the same file is supported. The variable `$CFGDIR` is
replaced by the path of the highest priority configuration directory found
(where *groups.conf* resides). The default *confdir* value enables both
system-wide and any installed user configuration (thanks to `$CFGDIR`).
Duplicate directory paths are ignored.
* Each following section (`genders`, `slurm`) defines a group source. The
map, all, list and reverse upcalls are explained below in
:ref:`group-sources-upcalls`.
.. _group-file-based:
File-based group sources
^^^^^^^^^^^^^^^^^^^^^^^^
Version 1.7 introduces support for native handling of flat files with
different group sources to avoid the use of external upcalls for such static
configuration. This can be achieved through the *autodir* feature and YAML
files described below.
YAML group files
""""""""""""""""
Cluster node groups can be defined in straightforward YAML files. In such a
file, each YAML dictionary defines group to nodes mapping. **Different
dictionaries** are handled as **different group sources**.
For compatibility reasons with previous versions of ClusterShell, this is not
the default way to define node groups yet. So here are the steps needed to try
this out:
Rename the following file::
/etc/clustershell/groups.d/cluster.yaml.example
to a file having the **.yaml** extension, for example::
/etc/clustershell/groups.d/cluster.yaml
Ensure that *autodir* is set in :ref:`groups_config_conf`::
autodir: /etc/clustershell/groups.d $CFGDIR/groups.d
In the following example, we also changed the default group source
to **roles** in :ref:`groups_config_conf` (the first dictionary defined in
the example), so that *@roles:groupname* can just be shorted *@groupname*.
.. highlight:: yaml
Here is an example of **/etc/clustershell/groups.d/cluster.yaml**::
roles:
adm: 'mgmt[1-2]' # define groups @roles:adm and @adm
login: 'login[1-2]'
compute: 'node[0001-0288]'
gpu: 'node[0001-0008]'
servers: # example of yaml list syntax for nodes
- 'server001' # in a group
- 'server002,server101'
- 'server[003-006]'
cpu_only: '@compute!@gpu' # example of inline set operation
# define group @cpu_only with node[0009-0288]
storage: '@lustre:mds,@lustre:oss' # example of external source reference
all: '@login,@compute,@storage' # special group used for clush/nodeset -a
# only needed if not including all groups
lustre:
mds: 'mds[1-4]'
oss: 'oss[0-15]'
rbh: 'rbh[1-2]'
.. highlight:: console
Testing the syntax of your group file can be quickly performed through the
``-L`` or ``--list-all`` command of :ref:`nodeset-tool`::
$ nodeset -LL
@adm mgmt[1-2]
@all login[1-2],mds[1-4],node[0001-0288],oss[0-15],rbh[1-2]
@compute node[0001-0288]
@cpu_only node[0009-0288]
@gpu node[0001-0008]
@login login[1-2]
@storage mds[1-4],oss[0-15],rbh[1-2]
@sysgrp sysgrp[1-4]
@lustre:mds mds[1-4]
@lustre:oss oss[0-15]
@lustre:rbh rbh[1-2]
.. _group-external-sources:
External group sources
^^^^^^^^^^^^^^^^^^^^^^
.. _group-sources-upcalls:
Group source upcalls
""""""""""""""""""""
Each node group source is defined by a section name (*source* name) and up to
four upcalls:
* **map**: External shell command used to resolve a group name into a node
set, list of nodes or list of node sets (separated by space characters or by
carriage returns). The variable *$GROUP* is replaced before executing the command.
* **all**: Optional external shell command that should return a node set, list
of nodes or list of node sets of all nodes for this group source. If not
specified, the library will try to resolve all nodes by using the **list**
external command in the same group source followed by **map** for each
available group. The notion of *all nodes* is used by ``clush -a`` and also
by the special group name ``@*`` (or ``@source:*``).
* **list**: Optional external shell command that should return the list of all
groups for this group source (separated by space characters or by carriage
returns). If this upcall is not specified, ClusterShell won't be able to
list any available groups (eg. with ``nodeset -l``), so it is highly
recommended to set it.
* **reverse**: Optional external shell command used to find the group(s) of a
single node. The variable *$NODE* is previously replaced. If this external
call is not specified, the reverse operation is computed in memory by the
library from the **list** and **map** external calls, if available. Also, if
the number of nodes to reverse is greater than the number of available
groups, the reverse external command is avoided automatically to reduce
resolution time.
In addition to context-dependent *$GROUP* and *$NODE* variables described
above, the two following variables are always available and also replaced
before executing shell commands:
* *$CFGDIR* is replaced by *groups.conf* base directory path
* *$SOURCE* is replaced by current source name (see an usage example just
below)
.. _group-external-caching:
Caching considerations
""""""""""""""""""""""
External command results are cached in memory, for a limited amount of time,
to avoid multiple similar calls.
The optional parameter **cache_time**, when specified within a group source
section, defines the number of seconds each upcall result is kept in cache,
in memory only. Please note that caching is actually only useful for
long-running programs (like daemons) that are using node groups, not for
one-shot commands like :ref:`clush <clush-tool>` or
:ref:`cluset <cluset-tool>`/:ref:`nodeset <nodeset-tool>`.
The default value of **cache_time** is 3600 seconds.
Multiple sources section
""""""""""""""""""""""""
.. highlight:: ini
Use a comma-separated list of source names in the section header if you want
to define multiple group sources with similar upcall commands. The special
variable `$SOURCE` is always replaced by the source name before command
execution (here `cluster`, `racks` and `cpu`), for example::
[cluster,racks,cpu]
map: get_nodes_from_source.sh $SOURCE $GROUP
all: get_all_nodes_from_source.sh $SOURCE
list: list_nodes_from_source.sh $SOURCE
is equivalent to::
[cluster]
map: get_nodes_from_source.sh cluster $GROUP
all: get_all_nodes_from_source.sh cluster
list: list_nodes_from_source.sh cluster
[racks]
map: get_nodes_from_source.sh racks $GROUP
all: get_all_nodes_from_source.sh racks
list: list_nodes_from_source.sh racks
[cpu]
map: get_nodes_from_source.sh cpu $GROUP
all: get_all_nodes_from_source.sh cpu
list: list_nodes_from_source.sh cpu
Return code of external calls
"""""""""""""""""""""""""""""
Each external command might return a non-zero return code when the operation
is not doable. But if the call return zero, for instance, for a non-existing
group, the user will not receive any error when trying to resolve such unknown
group. The desired behavior is up to the system administrator.
.. _group-slurm-bindings:
Slurm group bindings
""""""""""""""""""""
Enable Slurm node group bindings by renaming the example configuration file
usually installed as ``/etc/clustershell/groups.conf.d/slurm.conf.example`` to
``slurm.conf``. Three group sources are defined in this file and are detailed
below. Each section comes with a long and short names (for convenience), but
actually defines a same group source.
While examples below are based on the :ref:`nodeset-tool` tool, all Python
tools using ClusterShell and the :class:`.NodeSet` class will automatically
benefit from these additional node groups.
.. highlight:: ini
The first section **slurmpart,sp** defines a group source based on Slurm
partitions. Each group is named after the partition name and contains the
partition's nodes::
[slurmpart,sp]
map: sinfo -h -o "%N" -p $GROUP
all: sinfo -h -o "%N"
list: sinfo -h -o "%R"
reverse: sinfo -h -N -o "%R" -n $NODE
.. highlight:: console
Example of use with :ref:`nodeset <nodeset-tool>` on a cluster having two Slurm
partitions named *kepler* and *pascal*::
$ nodeset -s sp -ll
@sp:kepler cluster-[0001-0065]
@sp:pascal cluster-[0066-0068]
.. highlight:: ini
The second section **slurmstate,st** defines a group source based on Slurm
node states. Each group is based on a different state name and contains the
nodes currently in that state::
[slurmstate,st]
map: sinfo -h -o "%N" -t $GROUP
all: sinfo -h -o "%N"
list: sinfo -h -o "%T" | tr -d '*~#$@+'
reverse: sinfo -h -N -o "%T" -n $NODE | tr -d '*~#$@+'
cache_time: 60
Here, :ref:`cache_time <group-external-caching>` is set to 60 seconds instead
of the default (3600s) to avoid caching results in memory for too long, in
case of state change (this is only useful for long-running processes, not
one-shot commands).
.. highlight:: console
Example of use with :ref:`nodeset <nodeset-tool>` to get the current nodes that
are in the Slurm state *drained*::
$ nodeset -f @st:drained
cluster-[0058,0067]
.. highlight:: ini
The third section **slurmjob,sj** defines a group source based on Slurm jobs.
Each group is based on a running job ID and contains the nodes currently
allocated for this job::
[slurmjob,sj]
map: squeue -h -j $GROUP -o "%N"
list: squeue -h -o "%i" -t R
reverse: squeue -h -w $NODE -o "%i"
cache_time: 60
The fourth section **slurmuser,su** defines a group source based on Slurm users.
Each group is based on a username and contains the nodes currently
allocated for jobs belonging to the username::
[slurmuser,su]
map: squeue -h -u $GROUP -o "%N" -t R
list: squeue -h -o "%u" -t R
reverse: squeue -h -w $NODE -o "%i"
cache_time: 60
Example of use with :ref:`clush <clush-tool>` to execute a command on all nodes
with running jobs of username::
$ clush -bw@su:username 'df -Ph /scratch'
$ clush -bw@su:username 'du -s /scratch/username'
:ref:`cache_time <group-external-caching>` is also set to 60 seconds instead
of the default (3600s) to avoid caching results in memory for too long, because
this group source is likely very dynamic (this is only useful for long-running
processes, not one-shot commands).
.. highlight:: console
You can then easily find nodes associated with a Slurm job ID::
$ nodeset -f @sj:686518
cluster-[0003,0005,0010,0012,0015,0017,0021,0055]
.. _group-xcat-bindings:
xCAT group bindings
"""""""""""""""""""
Enable xCAT node group bindings by renaming the example configuration file
usually installed as ``/etc/clustershell/groups.conf.d/xcat.conf.example`` to
``xcat.conf``. A single group source is defined in this file and is detailed
below.
.. warning:: xCAT installs its own `nodeset`_ command which
usually takes precedence over ClusterShell's :ref:`nodeset-tool` command.
In that case, simply use :ref:`cluset <cluset-tool>` instead.
While examples below are based on the :ref:`cluset-tool` tool, all Python
tools using ClusterShell and the :class:`.NodeSet` class will automatically
benefit from these additional node groups.
.. highlight:: ini
The section **xcat** defines a group source based on xCAT static node groups::
[xcat]
# list the nodes in the specified node group
map: lsdef -s -t node $GROUP | cut -d' ' -f1
# list all the nodes defined in the xCAT tables
all: lsdef -s -t node | cut -d' ' -f1
# list all groups
list: lsdef -t group | cut -d' ' -f1
.. highlight:: console
Example of use with :ref:`cluset-tool`::
$ lsdef -s -t node dtn
sh-dtn01 (node)
sh-dtn02 (node)
$ cluset -s xcat -f @dtn
sh-dtn[01-02]
.. highlight:: text
.. _defaults-config:
Library Defaults
----------------
.. warning:: Modifying library defaults is for advanced users only as that
could change the behavior of tools using ClusterShell. Moreover, tools are
free to enforce their own defaults, so changing library defaults may not
change a global behavior as expected.
Since version 1.7, most defaults of the ClusterShell library may be overridden
in *defaults.conf*.
The following configuration file defines ClusterShell system-wide defaults::
/etc/clustershell/defaults.conf
*defaults.conf* settings might then be overridden (globally, or per user) if
one of the following files is found, in priority order::
$XDG_CONFIG_HOME/clustershell/defaults.conf
$HOME/.config/clustershell/defaults.conf (only if $XDG_CONFIG_HOME is not defined)
{sys.prefix}/etc/clustershell/defaults.conf
$HOME/.local/etc/clustershell/defaults.conf
In addition, if the environment variable ``$CLUSTERSHELL_CFGDIR`` is defined and
valid, it will used instead. In such case, the following configuration file
will be tried first for ClusterShell defaults::
$CLUSTERSHELL_CFGDIR/defaults.conf
Use case: rsh
^^^^^^^^^^^^^^
If your cluster uses a rsh variant like ``mrsh`` or ``krsh``, you may want to
change it in the library defaults.
An example file is usually available in
``/usr/share/doc/clustershell-*/examples/defaults.conf-rsh`` and could be
copied to ``/etc/clustershell/defaults.conf`` or to an alternate path
described above. Basically, the change consists in defining an alternate
distant worker by Python module name as follow::
[task.default]
distant_workername: Rsh
.. _defaults-config-slurm:
Use case: Slurm
^^^^^^^^^^^^^^^
If your cluster naming scheme has multiple dimensions, as in ``node-93-02``, we
recommend that you disengage some nD folding when using Slurm, which is
currently unable to detect some multidimensional node indexes when not
explicitly enclosed with square brackets.
To do so, define ``fold_axis`` to -1 in the :ref:`defaults-config` so that nD
folding is only computed on the last axis (seems to work best with Slurm)::
[nodeset]
fold_axis: -1
That way, node sets computed by ClusterShell tools can be passed to Slurm
without error.
.. _ConfigParser: http://docs.python.org/library/configparser.html
.. _nodeset: https://xcat-docs.readthedocs.io/en/stable/guides/admin-guides/references/man8/nodeset.8.html
.. _sys.prefix: https://docs.python.org/3/library/sys.html#sys.prefix
| PypiClean |
/MakkaPakka-1.0.4.tar.gz/MakkaPakka-1.0.4/src/makka_pakka/directed_graph/directed_graph.py | from typing import List
from makka_pakka.directed_graph.node import Node
from makka_pakka.exceptions.exceptions import ErrorType
from makka_pakka.exceptions.exceptions import MKPKCyclicDependency
from makka_pakka.exceptions.exceptions import MKPKInvalidParameter
class DirectedGraph:
"""
A stripped down implementation of a directed graph. This class is intended
for use as a dependency tree strcuture; therefore, self loops and
disconnected nodes are not implemented.
"""
def __init__(self, root_name: str) -> None:
"""
Directed graph constructor.
:param root_name: The text label for the root node in the directed graph.
"""
if not isinstance(root_name, str):
raise MKPKInvalidParameter("root_name", "__init__", root_name)
self.root = Node(root_name)
self.nodes: List[Node] = [self.root]
def get_node_with_label(self, label: str) -> Node | None:
"""
Gets the node in the directed graph with the specified label.
:param label: The label name to search for in the directed graph.
:return: The nodes with the specified label, or None if the node is
not found.
"""
if not isinstance(label, str):
raise MKPKInvalidParameter("label", "get_node_with_label", label)
for node in self.nodes:
if node.label == label:
return node
return None
def connect_to_node_with_label_create_if_not_exists(
self, node: Node, label: str
) -> bool:
"""
Connects a node to another node using its label for lookup. If a node
doesn't exist with this label, then a new node is created (with the
label), and a connection is made to this one.
:param node: The parent node to point to the child.
:param label: The label to search for a node with, or create a new node with.
:return: True if a node node is created, False if a node already
exists with the label.
"""
if not isinstance(node, Node) or node not in self.nodes:
raise MKPKInvalidParameter(
"node", "connect_to_node_with_label_create_if_not_exists", node
)
if not isinstance(label, str):
raise MKPKInvalidParameter(
"label",
"connect_to_node_with_label_create_if_not_exists",
label,
)
if label_node := self.get_node_with_label(label):
self.connect_node_to_node(node, label_node)
return False
else:
self.connect_new_node(node, label)
return True
def connect_node_to_node(self, connect_from: Node, connect_to: Node) -> bool:
"""
Creates a directed connection between two nodes.
:param connect_from: The node to create the connection from.
:param connect_to: The node to create the connection to.
:return: False when the connection already exists.
"""
if not isinstance(connect_from, Node):
raise MKPKInvalidParameter(
"connect_from", "connect_node_to_node", connect_from
)
if not isinstance(connect_to, Node):
raise MKPKInvalidParameter("connect_to", "connect_node_to_node", connect_to)
return connect_from.add_connected_node(connect_to)
def connect_new_node(self, parent: Node, new_node_label: str) -> Node:
"""
Creates a new node, and points a parent node to it.
:param parent: The Node which should be connected to the new node.
:param new_node_label: The label for the new node to be created.
:return: The new node that was created.
"""
if not isinstance(parent, Node):
raise MKPKInvalidParameter("parent", "connect_new_node", parent)
if not isinstance(new_node_label, str):
raise MKPKInvalidParameter("new_node_label", "parent", new_node_label)
new_node: Node = Node(new_node_label)
parent.add_connected_node(new_node)
self.nodes.append(new_node)
return new_node
def has_cyclic_dependency(self) -> List[Node]:
"""
Determines if the graph has a cycle, i.e nodes that point to each other
in a loop.
:return: A list of Nodes that form a cyclic loop. List is empty if
there is no cycle in the graph.
"""
cyclic_path_lock: bool = False
cyclic_path: List[str] = []
# Stores the backtrace from the root to the visited node.
current_path: List[Node] = [self.root]
def travel_to_connected(node: Node):
nonlocal cyclic_path, cyclic_path_lock, current_path
for adj_node in node.get_children():
# If we try to visit a node that is already in the backtrace,
# then there must be a cyclic dependency.
if adj_node in current_path:
current_path.append(adj_node)
# Check if a cyclic path has already been set, if not
# then this is the shortest cyclic path.
if not cyclic_path_lock:
cyclic_path = [n.label for n in current_path]
cyclic_path_lock = True
else:
current_path.append(adj_node)
travel_to_connected(adj_node)
# All connected nodes have been visited, therefore pop the
# current node from the backtrace.
current_path.pop()
travel_to_connected(self.root)
return cyclic_path
def create_and_assert_no_cycle(self, parent: Node, label: str) -> None:
"""
Creates a new Node node in the graph, if one doesn't already exist, and
raises an error if this node doesn't already exist.
:param parent: The parent node of the new node to create, if not exists.
:param label: The label of the new node, if it doesn't already exist.
:raises:
*MKPKCyclicDependency* - When creating the new node causes a loop
int the DirectedGraph.
"""
if not isinstance(parent, Node):
raise MKPKInvalidParameter("parent", "create_and_assert_no_cycle", parent)
if not isinstance(label, str):
raise MKPKInvalidParameter("label", "create_and_assert_no_cycle", label)
self.connect_to_node_with_label_create_if_not_exists(parent, label)
if cyclic_loop := self.has_cyclic_dependency():
raise MKPKCyclicDependency(
"Cyclic function call detected.",
f"A cyclic function call was detected while processing,\
the following call loop was found:\n\
{cyclic_loop}",
ErrorType.FATAL,
)
@staticmethod
def get_cyclic_dependency_str(path: List[str]) -> str:
"""
Gets a printable representation of the cyclic dependency returned
from has_cyclic_dependency.
:param path: A list of string labels - the return result of
has_cyclic_dependency.
:return: A string representation of the cyclic dependency.
"""
if not isinstance(path, list) or not all([isinstance(n, str) for n in path]):
raise MKPKInvalidParameter("path", "get_cyclic_dependency_str", path)
# Early breakout if there is no cyclic dependency.
if len(path) == 0:
return ""
format_message = path[0]
for label in path[1:]:
format_message += f" --> {label}"
return format_message | PypiClean |
/EpiTator-1.3.5.tar.gz/EpiTator-1.3.5/epitator/importers/import_geonames.py | from __future__ import absolute_import
from __future__ import print_function
import six
import csv
import unicodecsv
import re
import sys
from six import BytesIO
from zipfile import ZipFile
from six.moves.urllib import request
from six.moves.urllib.error import URLError
from ..get_database_connection import get_database_connection
from ..utils import parse_number, batched, normalize_text
GEONAMES_ZIP_URL = "http://download.geonames.org/export/dump/allCountries.zip"
geonames_field_mappings = [
('geonameid', 'text primary key'),
('name', 'text'),
('asciiname', 'text'),
('alternatenames', None),
('latitude', 'real'),
('longitude', 'real'),
('feature_class', 'text'),
('feature_code', 'text'),
('country_code', 'text'),
('cc2', 'text'),
('admin1_code', 'text'),
('admin2_code', 'text'),
('admin3_code', 'text'),
('admin4_code', 'text'),
('population', 'integer'),
('elevation', None),
('dem', None),
('timezone', None),
('modification_date', None)
]
def read_geonames_csv():
print("Downloading geoname data from: " + GEONAMES_ZIP_URL)
try:
url = request.urlopen(GEONAMES_ZIP_URL)
except URLError:
print("If you are operating behind a firewall, try setting the HTTP_PROXY/HTTPS_PROXY environment variables.")
raise
zipfile = ZipFile(BytesIO(url.read()))
print("Download complete")
# Loading geonames data may cause errors without setting csv.field_size_limit:
if sys.platform == "win32":
max_c_long_on_windows = (2**32 / 2) - 1
csv.field_size_limit(max_c_long_on_windows)
else:
csv.field_size_limit(sys.maxint if six.PY2 else six.MAXSIZE)
with zipfile.open('allCountries.txt') as f:
reader = unicodecsv.DictReader(f,
fieldnames=[
k for k, v in geonames_field_mappings],
encoding='utf-8',
delimiter='\t',
quoting=csv.QUOTE_NONE)
for d in reader:
d['population'] = parse_number(d['population'], 0)
d['latitude'] = parse_number(d['latitude'], 0)
d['longitude'] = parse_number(d['longitude'], 0)
if len(d['alternatenames']) > 0:
d['alternatenames'] = d['alternatenames'].split(',')
else:
d['alternatenames'] = []
yield d
def import_geonames(drop_previous=False):
connection = get_database_connection(create_database=True)
cur = connection.cursor()
if drop_previous:
print("Dropping geonames data...")
cur.execute("""DROP TABLE IF EXISTS 'geonames'""")
cur.execute("""DROP TABLE IF EXISTS 'alternatenames'""")
cur.execute("""DROP TABLE IF EXISTS 'alternatename_counts'""")
cur.execute("""DROP INDEX IF EXISTS 'alternatename_index'""")
cur.execute("""DROP TABLE IF EXISTS 'adminnames'""")
table_exists = len(list(cur.execute("""SELECT name FROM sqlite_master
WHERE type='table' AND name='geonames'"""))) > 0
if table_exists:
print("The geonames table already exists. "
"Run this again with --drop-previous to recreate it.")
return
# Create table
cur.execute("CREATE TABLE geonames (" + ",".join([
'"' + k + '" ' + sqltype
for k, sqltype in geonames_field_mappings if sqltype]) + ")")
cur.execute('''CREATE TABLE alternatenames
(geonameid text, alternatename text, alternatename_lemmatized text)''')
cur.execute('''CREATE TABLE adminnames
(name text,
country_code text, admin1_code text, admin2_code text, admin3_code text,
PRIMARY KEY (country_code, admin1_code, admin2_code, admin3_code))''')
i = 0
geonames_insert_command = 'INSERT INTO geonames VALUES (' + ','.join([
'?' for x, sqltype in geonames_field_mappings if sqltype]) + ')'
alternatenames_insert_command = 'INSERT INTO alternatenames VALUES (?, ?, ?)'
adminnames_insert_command = 'INSERT OR IGNORE INTO adminnames VALUES (?, ?, ?, ?, ?)'
for batch in batched(read_geonames_csv()):
geoname_tuples = []
alternatename_tuples = []
adminname_tuples = []
for geoname in batch:
i += 1
total_row_estimate = 11000000
if i % (total_row_estimate / 40) == 0:
print(i, '/', total_row_estimate, '+ geonames imported')
connection.commit()
if re.match(r"ADM[1-3]$", geoname['feature_code']) or re.match(r"PCL[IH]$", geoname['feature_code']):
adminname_tuples.append((
geoname['name'],
geoname['country_code'],
geoname['admin1_code'],
geoname['admin2_code'],
geoname['admin3_code'],))
geoname_tuples.append(
tuple(geoname[field]
for field, sqltype in geonames_field_mappings
if sqltype))
for possible_name in set([geoname['name'], geoname['asciiname']] + geoname['alternatenames']):
normalized_name = normalize_text(possible_name)
# require at least 2 word characters.
if re.match(r"(.*\w){2,}", normalized_name):
alternatename_tuples.append((
geoname['geonameid'],
possible_name,
normalized_name.lower()))
cur.executemany(geonames_insert_command, geoname_tuples)
cur.executemany(alternatenames_insert_command, alternatename_tuples)
cur.executemany(adminnames_insert_command, adminname_tuples)
print("Creating indexes...")
cur.execute('''
CREATE INDEX alternatename_index
ON alternatenames (alternatename_lemmatized);
''')
connection.commit()
cur.execute('''CREATE TABLE alternatename_counts
(geonameid text primary key, count integer)''')
cur.execute('''
INSERT INTO alternatename_counts
SELECT geonameid, count(alternatename)
FROM geonames INNER JOIN alternatenames USING ( geonameid )
GROUP BY geonameid
''')
connection.commit()
connection.close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--drop-previous", dest='drop_previous', action='store_true')
parser.set_defaults(drop_previous=False)
args = parser.parse_args()
import_geonames(args.drop_previous) | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/xmpp/RosterService.js | define(["dijit","dojo","dojox"],function(_1,_2,_3){
_2.provide("dojox.xmpp.RosterService");
_3.xmpp.roster={ADDED:101,CHANGED:102,REMOVED:103};
_2.declare("dojox.xmpp.RosterService",null,{constructor:function(_4){
this.session=_4;
},addRosterItem:function(_5,_6,_7){
if(!_5){
throw new Error("Roster::addRosterItem() - User ID is null");
}
var _8=this.session.getNextIqId();
var _9={id:_8,from:this.session.jid+"/"+this.session.resource,type:"set"};
var _a=new _3.string.Builder(_3.xmpp.util.createElement("iq",_9,false));
_a.append(_3.xmpp.util.createElement("query",{xmlns:"jabber:iq:roster"},false));
_5=_3.xmpp.util.encodeJid(_5);
if(_5.indexOf("@")==-1){
_5=_5+"@"+this.session.domain;
}
_a.append(_3.xmpp.util.createElement("item",{jid:_5,name:_3.xmpp.util.xmlEncode(_6)},false));
if(_7){
for(var i=0;i<_7.length;i++){
_a.append("<group>");
_a.append(_7[i]);
_a.append("</group>");
}
}
_a.append("</item></query></iq>");
var _b=this.session.dispatchPacket(_a.toString(),"iq",_9.id);
_b.addCallback(this,"verifyRoster");
return _b;
},updateRosterItem:function(_c,_d,_e){
if(_c.indexOf("@")==-1){
_c+=_c+"@"+this.session.domain;
}
var _f={id:this.session.getNextIqId(),from:this.session.jid+"/"+this.session.resource,type:"set"};
var _10=new _3.string.Builder(_3.xmpp.util.createElement("iq",_f,false));
_10.append(_3.xmpp.util.createElement("query",{xmlns:"jabber:iq:roster"},false));
var i=this.session.getRosterIndex(_c);
if(i==-1){
return;
}
var _11={jid:_c};
if(_d){
_11.name=_d;
}else{
if(this.session.roster[i].name){
_11.name=this.session.roster[i].name;
}
}
if(_11.name){
_11.name=_3.xmpp.util.xmlEncode(_11.name);
}
_10.append(_3.xmpp.util.createElement("item",_11,false));
var _12=_e?_e:this.session.roster[i].groups;
if(_12){
for(var x=0;x<_12.length;x++){
_10.append("<group>");
_10.append(_12[x]);
_10.append("</group>");
}
}
_10.append("</item></query></iq>");
var def=this.session.dispatchPacket(_10.toString(),"iq",_f.id);
def.addCallback(this,"verifyRoster");
return def;
},verifyRoster:function(res){
if(res.getAttribute("type")=="result"){
}else{
var err=this.session.processXmppError(res);
this.onAddRosterItemFailed(err);
}
return res;
},addRosterItemToGroup:function(jid,_13){
if(!jid){
throw new Error("Roster::addRosterItemToGroup() JID is null or undefined");
}
if(!_13){
throw new Error("Roster::addRosterItemToGroup() group is null or undefined");
}
var _14=this.session.getRosterIndex(jid);
if(_14==-1){
return;
}
var _15=this.session.roster[_14];
var _16=[];
var _17=false;
for(var i=0;((_15<_15.groups.length)&&(!_17));i++){
if(_15.groups[i]!=_13){
continue;
}
_17=true;
}
if(!_17){
return this.updateRosterItem(jid,_15.name,_15.groups.concat(_13),_14);
}
return _3.xmpp.xmpp.INVALID_ID;
},removeRosterGroup:function(_18){
var _19=this.session.roster;
for(var i=0;i<_19.length;i++){
var _1a=_19[i];
if(_1a.groups.length>0){
for(var j=0;j<_1a.groups.length;j++){
if(_1a.groups[j]==_18){
_1a.groups.splice(j,1);
this.updateRosterItem(_1a.jid,_1a.name,_1a.groups);
}
}
}
}
},renameRosterGroup:function(_1b,_1c){
var _1d=this.session.roster;
for(var i=0;i<_1d.length;i++){
var _1e=_1d[i];
if(_1e.groups.length>0){
for(var j=0;j<_1e.groups.length;j++){
if(_1e.groups[j]==_1b){
_1e.groups[j]=_1c;
this.updateRosterItem(_1e.jid,_1e.name,_1e.groups);
}
}
}
}
},removeRosterItemFromGroup:function(jid,_1f){
if(!jid){
throw new Error("Roster::addRosterItemToGroup() JID is null or undefined");
}
if(!_1f){
throw new Error("Roster::addRosterItemToGroup() group is null or undefined");
}
var _20=this.session.getRosterIndex(jid);
if(_20==-1){
return;
}
var _21=this.session.roster[_20];
var _22=false;
for(var i=0;((i<_21.groups.length)&&(!_22));i++){
if(_21.groups[i]!=_1f){
continue;
}
_22=true;
_20=i;
}
if(_22==true){
_21.groups.splice(_20,1);
return this.updateRosterItem(jid,_21.name,_21.groups);
}
return _3.xmpp.xmpp.INVALID_ID;
},rosterItemRenameGroup:function(jid,_23,_24){
if(!jid){
throw new Error("Roster::rosterItemRenameGroup() JID is null or undefined");
}
if(!_24){
throw new Error("Roster::rosterItemRenameGroup() group is null or undefined");
}
var _25=this.session.getRosterIndex(jid);
if(_25==-1){
return;
}
var _26=this.session.roster[_25];
var _27=false;
for(var i=0;((i<_26.groups.length)&&(!_27));i++){
if(_26.groups[i]==_23){
_26.groups[i]=_24;
_27=true;
}
}
if(_27==true){
return this.updateRosterItem(jid,_26.name,_26.groups);
}
return _3.xmpp.xmpp.INVALID_ID;
},renameRosterItem:function(jid,_28){
if(!jid){
throw new Error("Roster::addRosterItemToGroup() JID is null or undefined");
}
if(!_28){
throw new Error("Roster::addRosterItemToGroup() New Name is null or undefined");
}
var _29=this.session.getRosterIndex(jid);
if(_29==-1){
return;
}
return this.updateRosterItem(jid,_28,this.session.roster.groups,_29);
},removeRosterItem:function(jid){
if(!jid){
throw new Error("Roster::addRosterItemToGroup() JID is null or undefined");
}
var req={id:this.session.getNextIqId(),from:this.session.jid+"/"+this.session.resource,type:"set"};
var _2a=new _3.string.Builder(_3.xmpp.util.createElement("iq",req,false));
_2a.append(_3.xmpp.util.createElement("query",{xmlns:"jabber:iq:roster"},false));
if(jid.indexOf("@")==-1){
jid+=jid+"@"+this.session.domain;
}
_2a.append(_3.xmpp.util.createElement("item",{jid:jid,subscription:"remove"},true));
_2a.append("</query></iq>");
var def=this.session.dispatchPacket(_2a.toString(),"iq",req.id);
def.addCallback(this,"verifyRoster");
return def;
},getAvatar:function(jid){
},publishAvatar:function(_2b,_2c){
},onVerifyRoster:function(id){
},onVerifyRosterFailed:function(err){
}});
}); | PypiClean |
/Flask-MDEditor-0.1.4.tar.gz/Flask-MDEditor-0.1.4/flask_mdeditor/static/mdeditor/js/lib/codemirror/mode/htmlmixed/htmlmixed.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"), require("../xml/xml"), require("../javascript/javascript"), require("../css/css"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror", "../xml/xml", "../javascript/javascript", "../css/css"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("htmlmixed", function(config, parserConfig) {
var htmlMode = CodeMirror.getMode(config, {name: "xml",
htmlMode: true,
multilineTagIndentFactor: parserConfig.multilineTagIndentFactor,
multilineTagIndentPastTag: parserConfig.multilineTagIndentPastTag});
var cssMode = CodeMirror.getMode(config, "css");
var scriptTypes = [], scriptTypesConf = parserConfig && parserConfig.scriptTypes;
scriptTypes.push({matches: /^(?:text|application)\/(?:x-)?(?:java|ecma)script$|^$/i,
mode: CodeMirror.getMode(config, "javascript")});
if (scriptTypesConf) for (var i = 0; i < scriptTypesConf.length; ++i) {
var conf = scriptTypesConf[i];
scriptTypes.push({matches: conf.matches, mode: conf.mode && CodeMirror.getMode(config, conf.mode)});
}
scriptTypes.push({matches: /./,
mode: CodeMirror.getMode(config, "text/plain")});
function html(stream, state) {
var tagName = state.htmlState.tagName;
if (tagName) tagName = tagName.toLowerCase();
var style = htmlMode.token(stream, state.htmlState);
if (tagName == "script" && /\btag\b/.test(style) && stream.current() == ">") {
// Script block: mode to change to depends on type attribute
var scriptType = stream.string.slice(Math.max(0, stream.pos - 100), stream.pos).match(/\btype\s*=\s*("[^"]+"|'[^']+'|\S+)[^<]*$/i);
scriptType = scriptType ? scriptType[1] : "";
if (scriptType && /[\"\']/.test(scriptType.charAt(0))) scriptType = scriptType.slice(1, scriptType.length - 1);
for (var i = 0; i < scriptTypes.length; ++i) {
var tp = scriptTypes[i];
if (typeof tp.matches == "string" ? scriptType == tp.matches : tp.matches.test(scriptType)) {
if (tp.mode) {
state.token = script;
state.localMode = tp.mode;
state.localState = tp.mode.startState && tp.mode.startState(htmlMode.indent(state.htmlState, ""));
}
break;
}
}
} else if (tagName == "style" && /\btag\b/.test(style) && stream.current() == ">") {
state.token = css;
state.localMode = cssMode;
state.localState = cssMode.startState(htmlMode.indent(state.htmlState, ""));
}
return style;
}
function maybeBackup(stream, pat, style) {
var cur = stream.current();
var close = cur.search(pat), m;
if (close > -1) stream.backUp(cur.length - close);
else if (m = cur.match(/<\/?$/)) {
stream.backUp(cur.length);
if (!stream.match(pat, false)) stream.match(cur);
}
return style;
}
function script(stream, state) {
if (stream.match(/^<\/\s*script\s*>/i, false)) {
state.token = html;
state.localState = state.localMode = null;
return null;
}
return maybeBackup(stream, /<\/\s*script\s*>/,
state.localMode.token(stream, state.localState));
}
function css(stream, state) {
if (stream.match(/^<\/\s*style\s*>/i, false)) {
state.token = html;
state.localState = state.localMode = null;
return null;
}
return maybeBackup(stream, /<\/\s*style\s*>/,
cssMode.token(stream, state.localState));
}
return {
startState: function() {
var state = htmlMode.startState();
return {token: html, localMode: null, localState: null, htmlState: state};
},
copyState: function(state) {
if (state.localState)
var local = CodeMirror.copyState(state.localMode, state.localState);
return {token: state.token, localMode: state.localMode, localState: local,
htmlState: CodeMirror.copyState(htmlMode, state.htmlState)};
},
token: function(stream, state) {
return state.token(stream, state);
},
indent: function(state, textAfter) {
if (!state.localMode || /^\s*<\//.test(textAfter))
return htmlMode.indent(state.htmlState, textAfter);
else if (state.localMode.indent)
return state.localMode.indent(state.localState, textAfter);
else
return CodeMirror.Pass;
},
innerMode: function(state) {
return {state: state.localState || state.htmlState, mode: state.localMode || htmlMode};
}
};
}, "xml", "javascript", "css");
CodeMirror.defineMIME("text/html", "htmlmixed");
}); | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/umd/locale/en.js | (function (global, factory) {
if (typeof define === "function" && define.amd) {
define('element/locale/en', ['module', 'exports'], factory);
} else if (typeof exports !== "undefined") {
factory(module, exports);
} else {
var mod = {
exports: {}
};
factory(mod, mod.exports);
global.ELEMENT.lang = global.ELEMENT.lang || {};
global.ELEMENT.lang.en = mod.exports;
}
})(this, function (module, exports) {
'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'OK',
clear: 'Clear'
},
datepicker: {
now: 'Now',
today: 'Today',
cancel: 'Cancel',
clear: 'Clear',
confirm: 'OK',
selectDate: 'Select date',
selectTime: 'Select time',
startDate: 'Start Date',
startTime: 'Start Time',
endDate: 'End Date',
endTime: 'End Time',
prevYear: 'Previous Year',
nextYear: 'Next Year',
prevMonth: 'Previous Month',
nextMonth: 'Next Month',
year: '',
month1: 'January',
month2: 'February',
month3: 'March',
month4: 'April',
month5: 'May',
month6: 'June',
month7: 'July',
month8: 'August',
month9: 'September',
month10: 'October',
month11: 'November',
month12: 'December',
week: 'week',
weeks: {
sun: 'Sun',
mon: 'Mon',
tue: 'Tue',
wed: 'Wed',
thu: 'Thu',
fri: 'Fri',
sat: 'Sat'
},
months: {
jan: 'Jan',
feb: 'Feb',
mar: 'Mar',
apr: 'Apr',
may: 'May',
jun: 'Jun',
jul: 'Jul',
aug: 'Aug',
sep: 'Sep',
oct: 'Oct',
nov: 'Nov',
dec: 'Dec'
}
},
select: {
loading: 'Loading',
noMatch: 'No matching data',
noData: 'No data',
placeholder: 'Select'
},
cascader: {
noMatch: 'No matching data',
loading: 'Loading',
placeholder: 'Select',
noData: 'No data'
},
pagination: {
goto: 'Go to',
pagesize: '/page',
total: 'Total {total}',
pageClassifier: ''
},
messagebox: {
title: 'Message',
confirm: 'OK',
cancel: 'Cancel',
error: 'Illegal input'
},
upload: {
deleteTip: 'press delete to remove',
delete: 'Delete',
preview: 'Preview',
continue: 'Continue'
},
table: {
emptyText: 'No Data',
confirmFilter: 'Confirm',
resetFilter: 'Reset',
clearFilter: 'All',
sumText: 'Sum'
},
tree: {
emptyText: 'No Data'
},
transfer: {
noMatch: 'No matching data',
noData: 'No data',
titles: ['List 1', 'List 2'], // to be translated
filterPlaceholder: 'Enter keyword', // to be translated
noCheckedFormat: '{total} items', // to be translated
hasCheckedFormat: '{checked}/{total} checked' // to be translated
},
image: {
error: 'FAILED'
},
pageHeader: {
title: 'Back' // to be translated
}
}
};
module.exports = exports['default'];
}); | PypiClean |
/Dhelpers-0.1.5rc1.tar.gz/Dhelpers-0.1.5rc1/Dlib/Dpowers/events/hookpower/__init__.py | from .. import Adaptor, adaptionmethod
from .baseclasses import CallbackHook, KeyhookBase, ButtonhookBase
class HookAdaptor(Adaptor):
@adaptionmethod("Keyhook")
def keys(self, callback = False, timeout = 60, *, capture = False,
reinject_func = None, priority: int = 0, dedicated_thread=
False, press=True, release=True, allow_multipress=False,
write_rls=True, **custom_kwargs):
return self.keys.target_with_args()
@adaptionmethod("Buttonhook")
def buttons(self, callback = False, timeout = 60, *, capture = False,
reinject_func = None, priority: int = 0, dedicated_thread =
False, press=True, release=True, write_rls=True, **custom_kwargs):
return self.buttons.target_with_args()
@adaptionmethod("Cursorhook")
def cursor(self, callback = False, timeout = 60, *, capture = False,
reinject_func = None, priority: int = 0, dedicated_thread =
False, **custom_kwargs):
return self.cursor.target_with_args()
@adaptionmethod("Customhook")
def custom(self, callback = False, timeout=60, *, capture=False,
reinject_func=None, priority: int = 0, dedicated_thread = False,
**custom_kwargs):
return self.custom.target_with_args()
@keys.target_modifier
def _km(self, target):
self.Keyhook_class = target
target.NamedClass = self.NamedKeyClass
target.update_active_dict()
self.key_translation_dicts = target.name_translation_dicts
return target
def add_key_translation(self, dic):
self.key_translation_dicts += [dic]
self.Keyhook_class.update_active_dict()
@buttons.target_modifier
def _bm(self, target):
self.Buttonhook_class = target
target.NamedClass = self.NamedButtonClass
target.update_active_dict()
self.button_translation_dicts = target.name_translation_dicts
return target
def add_button_translation(self, dic):
self.button_translation_dicts += [dic]
self.Buttonhook_class.update_active_dict()
def keysbuttons(self, keybfunc=False, timeout=60, *, mousefunc=False,
allow_multipress = False, **hookkwargs):
# returns a HookContainer instance
if mousefunc is False: mousefunc = keybfunc
return self.keys(keybfunc, timeout, allow_multipress= allow_multipress,
**hookkwargs) + self.buttons(mousefunc, timeout, **hookkwargs)
def keyboard_mouse(self, keybfunc=False, timeout=60, *, cursorfunc=False,
mousefunc=False, **hookkwargs):
if cursorfunc is False: cursorfunc = keybfunc
return self.keysbuttons(keybfunc, timeout, mousefunc=mousefunc,
**hookkwargs) + self.cursor(cursorfunc, timeout)
# from Dhelpers.baseclasses import AdditionContainer
#
# class HookAdaptorContainer(AdditionContainer):
# basic_class = HookAdaptor
#
# @functools.wraps(HookAdaptor.keys)
# def keys(self,*args,**kwargs):
# return sum(m.keys(*args,**kwargs) for m in self.members)
#
# @functools.wraps(HookAdaptor.buttons¹23#)
# def buttons(self,*args,**kwargs):
# return sum(m.buttons(*args,**kwargs) for m in self.members)
#
# @functools.wraps(HookAdaptor.cursor)
# def cursor(self,*args,**kwargs):
# return sum(m.cursor(*args,**kwargs) for m in self.members)
#
# @functools.wraps(HookAdaptor.keysbuttons)
# def keysbuttons(self,*args,**kwargs):
# return sum(m.keysbuttons(*args,**kwargs) for m in self.members)
#
# @functools.wraps(HookAdaptor.keyboard_mouse)
# def keyboard_mouse(self,*args,**kwargs):
# return sum(m.keyboard_mouse(*args,**kwargs) for m in self.members) | PypiClean |
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/object_detection/eval_util.py | """Common utility functions for evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import re
import time
import numpy as np
from six.moves import range
import tensorflow.compat.v1 as tf
import tf_slim as slim
from object_detection.core import box_list
from object_detection.core import box_list_ops
from object_detection.core import keypoint_ops
from object_detection.core import standard_fields as fields
from object_detection.metrics import coco_evaluation
from object_detection.metrics import lvis_evaluation
from object_detection.protos import eval_pb2
from object_detection.utils import label_map_util
from object_detection.utils import object_detection_evaluation
from object_detection.utils import ops
from object_detection.utils import shape_utils
from object_detection.utils import visualization_utils as vis_utils
EVAL_KEYPOINT_METRIC = 'coco_keypoint_metrics'
# A dictionary of metric names to classes that implement the metric. The classes
# in the dictionary must implement
# utils.object_detection_evaluation.DetectionEvaluator interface.
EVAL_METRICS_CLASS_DICT = {
'coco_detection_metrics':
coco_evaluation.CocoDetectionEvaluator,
'coco_keypoint_metrics':
coco_evaluation.CocoKeypointEvaluator,
'coco_mask_metrics':
coco_evaluation.CocoMaskEvaluator,
'coco_panoptic_metrics':
coco_evaluation.CocoPanopticSegmentationEvaluator,
'lvis_mask_metrics':
lvis_evaluation.LVISMaskEvaluator,
'oid_challenge_detection_metrics':
object_detection_evaluation.OpenImagesDetectionChallengeEvaluator,
'oid_challenge_segmentation_metrics':
object_detection_evaluation
.OpenImagesInstanceSegmentationChallengeEvaluator,
'pascal_voc_detection_metrics':
object_detection_evaluation.PascalDetectionEvaluator,
'weighted_pascal_voc_detection_metrics':
object_detection_evaluation.WeightedPascalDetectionEvaluator,
'precision_at_recall_detection_metrics':
object_detection_evaluation.PrecisionAtRecallDetectionEvaluator,
'pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.PascalInstanceSegmentationEvaluator,
'weighted_pascal_voc_instance_segmentation_metrics':
object_detection_evaluation.WeightedPascalInstanceSegmentationEvaluator,
'oid_V2_detection_metrics':
object_detection_evaluation.OpenImagesDetectionEvaluator,
}
EVAL_DEFAULT_METRIC = 'coco_detection_metrics'
def write_metrics(metrics, global_step, summary_dir):
"""Write metrics to a summary directory.
Args:
metrics: A dictionary containing metric names and values.
global_step: Global step at which the metrics are computed.
summary_dir: Directory to write tensorflow summaries to.
"""
tf.logging.info('Writing metrics to tf summary.')
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
for key in sorted(metrics):
summary = tf.Summary(value=[
tf.Summary.Value(tag=key, simple_value=metrics[key]),
])
summary_writer.add_summary(summary, global_step)
tf.logging.info('%s: %f', key, metrics[key])
tf.logging.info('Metrics written to tf summary.')
# TODO(rathodv): Add tests.
def visualize_detection_results(result_dict,
tag,
global_step,
categories,
summary_dir='',
export_dir='',
agnostic_mode=False,
show_groundtruth=False,
groundtruth_box_visualization_color='black',
min_score_thresh=.5,
max_num_predictions=20,
skip_scores=False,
skip_labels=False,
keep_image_id_for_visualization_export=False):
"""Visualizes detection results and writes visualizations to image summaries.
This function visualizes an image with its detected bounding boxes and writes
to image summaries which can be viewed on tensorboard. It optionally also
writes images to a directory. In the case of missing entry in the label map,
unknown class name in the visualization is shown as "N/A".
Args:
result_dict: a dictionary holding groundtruth and detection
data corresponding to each image being evaluated. The following keys
are required:
'original_image': a numpy array representing the image with shape
[1, height, width, 3] or [1, height, width, 1]
'detection_boxes': a numpy array of shape [N, 4]
'detection_scores': a numpy array of shape [N]
'detection_classes': a numpy array of shape [N]
The following keys are optional:
'groundtruth_boxes': a numpy array of shape [N, 4]
'groundtruth_keypoints': a numpy array of shape [N, num_keypoints, 2]
Detections are assumed to be provided in decreasing order of score and for
display, and we assume that scores are probabilities between 0 and 1.
tag: tensorboard tag (string) to associate with image.
global_step: global step at which the visualization are generated.
categories: a list of dictionaries representing all possible categories.
Each dict in this list has the following keys:
'id': (required) an integer id uniquely identifying this category
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'
'supercategory': (optional) string representing the supercategory
e.g., 'animal', 'vehicle', 'food', etc
summary_dir: the output directory to which the image summaries are written.
export_dir: the output directory to which images are written. If this is
empty (default), then images are not exported.
agnostic_mode: boolean (default: False) controlling whether to evaluate in
class-agnostic mode or not.
show_groundtruth: boolean (default: False) controlling whether to show
groundtruth boxes in addition to detected boxes
groundtruth_box_visualization_color: box color for visualizing groundtruth
boxes
min_score_thresh: minimum score threshold for a box to be visualized
max_num_predictions: maximum number of detections to visualize
skip_scores: whether to skip score when drawing a single detection
skip_labels: whether to skip label when drawing a single detection
keep_image_id_for_visualization_export: whether to keep image identifier in
filename when exported to export_dir
Raises:
ValueError: if result_dict does not contain the expected keys (i.e.,
'original_image', 'detection_boxes', 'detection_scores',
'detection_classes')
"""
detection_fields = fields.DetectionResultFields
input_fields = fields.InputDataFields
if not set([
input_fields.original_image,
detection_fields.detection_boxes,
detection_fields.detection_scores,
detection_fields.detection_classes,
]).issubset(set(result_dict.keys())):
raise ValueError('result_dict does not contain all expected keys.')
if show_groundtruth and input_fields.groundtruth_boxes not in result_dict:
raise ValueError('If show_groundtruth is enabled, result_dict must contain '
'groundtruth_boxes.')
tf.logging.info('Creating detection visualizations.')
category_index = label_map_util.create_category_index(categories)
image = np.squeeze(result_dict[input_fields.original_image], axis=0)
if image.shape[2] == 1: # If one channel image, repeat in RGB.
image = np.tile(image, [1, 1, 3])
detection_boxes = result_dict[detection_fields.detection_boxes]
detection_scores = result_dict[detection_fields.detection_scores]
detection_classes = np.int32((result_dict[
detection_fields.detection_classes]))
detection_keypoints = result_dict.get(detection_fields.detection_keypoints)
detection_masks = result_dict.get(detection_fields.detection_masks)
detection_boundaries = result_dict.get(detection_fields.detection_boundaries)
# Plot groundtruth underneath detections
if show_groundtruth:
groundtruth_boxes = result_dict[input_fields.groundtruth_boxes]
groundtruth_keypoints = result_dict.get(input_fields.groundtruth_keypoints)
vis_utils.visualize_boxes_and_labels_on_image_array(
image=image,
boxes=groundtruth_boxes,
classes=None,
scores=None,
category_index=category_index,
keypoints=groundtruth_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=None,
groundtruth_box_visualization_color=groundtruth_box_visualization_color)
vis_utils.visualize_boxes_and_labels_on_image_array(
image,
detection_boxes,
detection_classes,
detection_scores,
category_index,
instance_masks=detection_masks,
instance_boundaries=detection_boundaries,
keypoints=detection_keypoints,
use_normalized_coordinates=False,
max_boxes_to_draw=max_num_predictions,
min_score_thresh=min_score_thresh,
agnostic_mode=agnostic_mode,
skip_scores=skip_scores,
skip_labels=skip_labels)
if export_dir:
if keep_image_id_for_visualization_export and result_dict[fields.
InputDataFields()
.key]:
export_path = os.path.join(export_dir, 'export-{}-{}.png'.format(
tag, result_dict[fields.InputDataFields().key]))
else:
export_path = os.path.join(export_dir, 'export-{}.png'.format(tag))
vis_utils.save_image_array_as_png(image, export_path)
summary = tf.Summary(value=[
tf.Summary.Value(
tag=tag,
image=tf.Summary.Image(
encoded_image_string=vis_utils.encode_image_array_as_png_str(
image)))
])
summary_writer = tf.summary.FileWriterCache.get(summary_dir)
summary_writer.add_summary(summary, global_step)
tf.logging.info('Detection visualizations written to summary with tag %s.',
tag)
def _run_checkpoint_once(tensor_dict,
evaluators=None,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None,
process_metrics_fn=None):
"""Evaluates metrics defined in evaluators and returns summaries.
This function loads the latest checkpoint in checkpoint_dirs and evaluates
all metrics defined in evaluators. The metrics are processed in batch by the
batch_processor.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking four arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
To skip an image, it suffices to return an empty dictionary in place of
result_dict.
checkpoint_dirs: list of directories to load into an EnsembleModel. If it
has only one directory, EnsembleModel will not be used --
a DetectionModel
will be instantiated directly. Not used if restore_fn is set.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: None, or a function that takes a tf.Session object and correctly
restores all necessary variables from the correct checkpoint file. If
None, attempts to restore from the first directory in checkpoint_dirs.
num_batches: the number of batches to use for evaluation.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is stored as a pbtxt file.
save_graph_dir: where to store the Tensorflow graph on disk. If save_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
process_metrics_fn: a callback called with evaluation results after each
evaluation is done. It could be used e.g. to back up checkpoints with
best evaluation scores, or to call an external system to update evaluation
results in order to drive best hyper-parameter search. Parameters are:
int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,
str checkpoint_file path.
Returns:
global_step: the count of global steps.
all_evaluator_metrics: A dictionary containing metric names and values.
Raises:
ValueError: if restore_fn is None and checkpoint_dirs doesn't have at least
one element.
ValueError: if save_graph is True and save_graph_dir is not defined.
"""
if save_graph and not save_graph_dir:
raise ValueError('`save_graph_dir` must be defined.')
sess = tf.Session(master, graph=tf.get_default_graph())
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
checkpoint_file = None
if restore_fn:
restore_fn(sess)
else:
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
checkpoint_file = tf.train.latest_checkpoint(checkpoint_dirs[0])
saver = tf.train.Saver(variables_to_restore)
saver.restore(sess, checkpoint_file)
if save_graph:
tf.train.write_graph(sess.graph_def, save_graph_dir, 'eval.pbtxt')
counters = {'skipped': 0, 'success': 0}
aggregate_result_losses_dict = collections.defaultdict(list)
with slim.queues.QueueRunners(sess):
try:
for batch in range(int(num_batches)):
if (batch + 1) % 100 == 0:
tf.logging.info('Running eval ops batch %d/%d', batch + 1,
num_batches)
if not batch_processor:
try:
if not losses_dict:
losses_dict = {}
result_dict, result_losses_dict = sess.run([tensor_dict,
losses_dict])
counters['success'] += 1
except tf.errors.InvalidArgumentError:
tf.logging.info('Skipping image')
counters['skipped'] += 1
result_dict = {}
else:
result_dict, result_losses_dict = batch_processor(
tensor_dict, sess, batch, counters, losses_dict=losses_dict)
if not result_dict:
continue
for key, value in iter(result_losses_dict.items()):
aggregate_result_losses_dict[key].append(value)
for evaluator in evaluators:
# TODO(b/65130867): Use image_id tensor once we fix the input data
# decoders to return correct image_id.
# TODO(akuznetsa): result_dict contains batches of images, while
# add_single_ground_truth_image_info expects a single image. Fix
if (isinstance(result_dict, dict) and
fields.InputDataFields.key in result_dict and
result_dict[fields.InputDataFields.key]):
image_id = result_dict[fields.InputDataFields.key]
else:
image_id = batch
evaluator.add_single_ground_truth_image_info(
image_id=image_id, groundtruth_dict=result_dict)
evaluator.add_single_detected_image_info(
image_id=image_id, detections_dict=result_dict)
tf.logging.info('Running eval batches done.')
except tf.errors.OutOfRangeError:
tf.logging.info('Done evaluating -- epoch limit reached')
finally:
# When done, ask the threads to stop.
tf.logging.info('# success: %d', counters['success'])
tf.logging.info('# skipped: %d', counters['skipped'])
all_evaluator_metrics = {}
if eval_export_path and eval_export_path is not None:
for evaluator in evaluators:
if (isinstance(evaluator, coco_evaluation.CocoDetectionEvaluator) or
isinstance(evaluator, coco_evaluation.CocoMaskEvaluator)):
tf.logging.info('Started dumping to json file.')
evaluator.dump_detections_to_json_file(
json_output_path=eval_export_path)
tf.logging.info('Finished dumping to json file.')
for evaluator in evaluators:
metrics = evaluator.evaluate()
evaluator.clear()
if any(key in all_evaluator_metrics for key in metrics):
raise ValueError('Metric names between evaluators must not collide.')
all_evaluator_metrics.update(metrics)
global_step = tf.train.global_step(sess, tf.train.get_global_step())
for key, value in iter(aggregate_result_losses_dict.items()):
all_evaluator_metrics['Losses/' + key] = np.mean(value)
if process_metrics_fn and checkpoint_file:
m = re.search(r'model.ckpt-(\d+)$', checkpoint_file)
if not m:
tf.logging.error('Failed to parse checkpoint number from: %s',
checkpoint_file)
else:
checkpoint_number = int(m.group(1))
process_metrics_fn(checkpoint_number, all_evaluator_metrics,
checkpoint_file)
sess.close()
return (global_step, all_evaluator_metrics)
# TODO(rathodv): Add tests.
def repeated_checkpoint_run(tensor_dict,
summary_dir,
evaluators,
batch_processor=None,
checkpoint_dirs=None,
variables_to_restore=None,
restore_fn=None,
num_batches=1,
eval_interval_secs=120,
max_number_of_evaluations=None,
max_evaluation_global_step=None,
master='',
save_graph=False,
save_graph_dir='',
losses_dict=None,
eval_export_path=None,
process_metrics_fn=None):
"""Periodically evaluates desired tensors using checkpoint_dirs or restore_fn.
This function repeatedly loads a checkpoint and evaluates a desired
set of tensors (provided by tensor_dict) and hands the resulting numpy
arrays to a function result_processor which can be used to further
process/save/visualize the results.
Args:
tensor_dict: a dictionary holding tensors representing a batch of detections
and corresponding groundtruth annotations.
summary_dir: a directory to write metrics summaries.
evaluators: a list of object of type DetectionEvaluator to be used for
evaluation. Note that the metric names produced by different evaluators
must be unique.
batch_processor: a function taking three arguments:
1. tensor_dict: the same tensor_dict that is passed in as the first
argument to this function.
2. sess: a tensorflow session
3. batch_index: an integer representing the index of the batch amongst
all batches
By default, batch_processor is None, which defaults to running:
return sess.run(tensor_dict)
checkpoint_dirs: list of directories to load into a DetectionModel or an
EnsembleModel if restore_fn isn't set. Also used to determine when to run
next evaluation. Must have at least one element.
variables_to_restore: None, or a dictionary mapping variable names found in
a checkpoint to model variables. The dictionary would normally be
generated by creating a tf.train.ExponentialMovingAverage object and
calling its variables_to_restore() method. Not used if restore_fn is set.
restore_fn: a function that takes a tf.Session object and correctly restores
all necessary variables from the correct checkpoint file.
num_batches: the number of batches to use for evaluation.
eval_interval_secs: the number of seconds between each evaluation run.
max_number_of_evaluations: the max number of iterations of the evaluation.
If the value is left as None the evaluation continues indefinitely.
max_evaluation_global_step: global step when evaluation stops.
master: the location of the Tensorflow session.
save_graph: whether or not the Tensorflow graph is saved as a pbtxt file.
save_graph_dir: where to save on disk the Tensorflow graph. If store_graph
is True this must be non-empty.
losses_dict: optional dictionary of scalar detection losses.
eval_export_path: Path for saving a json file that contains the detection
results in json format.
process_metrics_fn: a callback called with evaluation results after each
evaluation is done. It could be used e.g. to back up checkpoints with
best evaluation scores, or to call an external system to update evaluation
results in order to drive best hyper-parameter search. Parameters are:
int checkpoint_number, Dict[str, ObjectDetectionEvalMetrics] metrics,
str checkpoint_file path.
Returns:
metrics: A dictionary containing metric names and values in the latest
evaluation.
Raises:
ValueError: if max_num_of_evaluations is not None or a positive number.
ValueError: if checkpoint_dirs doesn't have at least one element.
"""
if max_number_of_evaluations and max_number_of_evaluations <= 0:
raise ValueError(
'`max_number_of_evaluations` must be either None or a positive number.')
if max_evaluation_global_step and max_evaluation_global_step <= 0:
raise ValueError(
'`max_evaluation_global_step` must be either None or positive.')
if not checkpoint_dirs:
raise ValueError('`checkpoint_dirs` must have at least one entry.')
last_evaluated_model_path = None
number_of_evaluations = 0
while True:
start = time.time()
tf.logging.info('Starting evaluation at ' + time.strftime(
'%Y-%m-%d-%H:%M:%S', time.gmtime()))
model_path = tf.train.latest_checkpoint(checkpoint_dirs[0])
if not model_path:
tf.logging.info('No model found in %s. Will try again in %d seconds',
checkpoint_dirs[0], eval_interval_secs)
elif model_path == last_evaluated_model_path:
tf.logging.info('Found already evaluated checkpoint. Will try again in '
'%d seconds', eval_interval_secs)
else:
last_evaluated_model_path = model_path
global_step, metrics = _run_checkpoint_once(
tensor_dict,
evaluators,
batch_processor,
checkpoint_dirs,
variables_to_restore,
restore_fn,
num_batches,
master,
save_graph,
save_graph_dir,
losses_dict=losses_dict,
eval_export_path=eval_export_path,
process_metrics_fn=process_metrics_fn)
write_metrics(metrics, global_step, summary_dir)
if (max_evaluation_global_step and
global_step >= max_evaluation_global_step):
tf.logging.info('Finished evaluation!')
break
number_of_evaluations += 1
if (max_number_of_evaluations and
number_of_evaluations >= max_number_of_evaluations):
tf.logging.info('Finished evaluation!')
break
time_to_next_eval = start + eval_interval_secs - time.time()
if time_to_next_eval > 0:
time.sleep(time_to_next_eval)
return metrics
def _scale_box_to_absolute(args):
boxes, image_shape = args
return box_list_ops.to_absolute_coordinates(
box_list.BoxList(boxes), image_shape[0], image_shape[1]).get()
def _resize_detection_masks(arg_tuple):
"""Resizes detection masks.
Args:
arg_tuple: A (detection_boxes, detection_masks, image_shape, pad_shape)
tuple where
detection_boxes is a tf.float32 tensor of size [num_masks, 4] containing
the box corners. Row i contains [ymin, xmin, ymax, xmax] of the box
corresponding to mask i. Note that the box corners are in
normalized coordinates.
detection_masks is a tensor of size
[num_masks, mask_height, mask_width].
image_shape is a tensor of shape [2]
pad_shape is a tensor of shape [2] --- this is assumed to be greater
than or equal to image_shape along both dimensions and represents a
shape to-be-padded-to.
Returns:
"""
detection_boxes, detection_masks, image_shape, pad_shape = arg_tuple
detection_masks_reframed = ops.reframe_box_masks_to_image_masks(
detection_masks, detection_boxes, image_shape[0], image_shape[1])
pad_instance_dim = tf.zeros([3, 1], dtype=tf.int32)
pad_hw_dim = tf.concat([tf.zeros([1], dtype=tf.int32),
pad_shape - image_shape], axis=0)
pad_hw_dim = tf.expand_dims(pad_hw_dim, 1)
paddings = tf.concat([pad_instance_dim, pad_hw_dim], axis=1)
detection_masks_reframed = tf.pad(detection_masks_reframed, paddings)
# If the masks are currently float, binarize them. Otherwise keep them as
# integers, since they have already been thresholded.
if detection_masks_reframed.dtype == tf.float32:
detection_masks_reframed = tf.greater(detection_masks_reframed, 0.5)
return tf.cast(detection_masks_reframed, tf.uint8)
def resize_detection_masks(detection_boxes, detection_masks,
original_image_spatial_shapes):
"""Resizes per-box detection masks to be relative to the entire image.
Note that this function only works when the spatial size of all images in
the batch is the same. If not, this function should be used with batch_size=1.
Args:
detection_boxes: A [batch_size, num_instances, 4] float tensor containing
bounding boxes.
detection_masks: A [batch_size, num_instances, height, width] float tensor
containing binary instance masks per box.
original_image_spatial_shapes: a [batch_size, 3] shaped int tensor
holding the spatial dimensions of each image in the batch.
Returns:
masks: Masks resized to the spatial extents given by
(original_image_spatial_shapes[0, 0], original_image_spatial_shapes[0, 1])
"""
# modify original image spatial shapes to be max along each dim
# in evaluator, should have access to original_image_spatial_shape field
# in add_Eval_Dict
max_spatial_shape = tf.reduce_max(
original_image_spatial_shapes, axis=0, keep_dims=True)
tiled_max_spatial_shape = tf.tile(
max_spatial_shape,
multiples=[tf.shape(original_image_spatial_shapes)[0], 1])
return shape_utils.static_or_dynamic_map_fn(
_resize_detection_masks,
elems=[detection_boxes,
detection_masks,
original_image_spatial_shapes,
tiled_max_spatial_shape],
dtype=tf.uint8)
def _resize_groundtruth_masks(args):
"""Resizes groundtruth masks to the original image size."""
mask, true_image_shape, original_image_shape, pad_shape = args
true_height = true_image_shape[0]
true_width = true_image_shape[1]
mask = mask[:, :true_height, :true_width]
mask = tf.expand_dims(mask, 3)
mask = tf.image.resize_images(
mask,
original_image_shape,
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
align_corners=True)
paddings = tf.concat(
[tf.zeros([3, 1], dtype=tf.int32),
tf.expand_dims(
tf.concat([tf.zeros([1], dtype=tf.int32),
pad_shape-original_image_shape], axis=0),
1)], axis=1)
mask = tf.pad(tf.squeeze(mask, 3), paddings)
return tf.cast(mask, tf.uint8)
def _resize_surface_coordinate_masks(args):
detection_boxes, surface_coords, image_shape = args
surface_coords_v, surface_coords_u = tf.unstack(surface_coords, axis=-1)
surface_coords_v_reframed = ops.reframe_box_masks_to_image_masks(
surface_coords_v, detection_boxes, image_shape[0], image_shape[1])
surface_coords_u_reframed = ops.reframe_box_masks_to_image_masks(
surface_coords_u, detection_boxes, image_shape[0], image_shape[1])
return tf.stack([surface_coords_v_reframed, surface_coords_u_reframed],
axis=-1)
def _scale_keypoint_to_absolute(args):
keypoints, image_shape = args
return keypoint_ops.scale(keypoints, image_shape[0], image_shape[1])
def result_dict_for_single_example(image,
key,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
Args:
image: A single 4D uint8 image tensor of shape [1, H, W, C].
key: A single string tensor identifying the image.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized coordinates.
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with
keypoints (Optional).
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
Returns:
A dictionary with:
'original_image': A [1, H, W, C] uint8 image tensor.
'key': A string tensor with image identifier.
'detection_boxes': [max_detections, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [max_detections] float32 tensor of scores.
'detection_classes': [max_detections] int64 tensor of 1-indexed classes.
'detection_masks': [max_detections, H, W] float32 tensor of binarized
masks, reframed to full image masks.
'groundtruth_boxes': [num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [num_boxes] int64 tensor of 1-indexed classes.
(Optional)
'groundtruth_area': [num_boxes] float32 tensor of bbox area. (Optional)
'groundtruth_is_crowd': [num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 3D int64 tensor of instance masks
(Optional).
'groundtruth_keypoints': [num_boxes, num_keypoints, 2] float32 tensor with
keypoints (Optional).
"""
if groundtruth:
max_gt_boxes = tf.shape(
groundtruth[fields.InputDataFields.groundtruth_boxes])[0]
for gt_key in groundtruth:
# expand groundtruth dict along the batch dimension.
groundtruth[gt_key] = tf.expand_dims(groundtruth[gt_key], 0)
for detection_key in detections:
detections[detection_key] = tf.expand_dims(
detections[detection_key][0], axis=0)
batched_output_dict = result_dict_for_batched_example(
image,
tf.expand_dims(key, 0),
detections,
groundtruth,
class_agnostic,
scale_to_absolute,
max_gt_boxes=max_gt_boxes)
exclude_keys = [
fields.InputDataFields.original_image,
fields.DetectionResultFields.num_detections,
fields.InputDataFields.num_groundtruth_boxes
]
output_dict = {
fields.InputDataFields.original_image:
batched_output_dict[fields.InputDataFields.original_image]
}
for key in batched_output_dict:
# remove the batch dimension.
if key not in exclude_keys:
output_dict[key] = tf.squeeze(batched_output_dict[key], 0)
return output_dict
def result_dict_for_batched_example(images,
keys,
detections,
groundtruth=None,
class_agnostic=False,
scale_to_absolute=False,
original_image_spatial_shapes=None,
true_image_shapes=None,
max_gt_boxes=None,
label_id_offset=1):
"""Merges all detection and groundtruth information for a single example.
Note that evaluation tools require classes that are 1-indexed, and so this
function performs the offset. If `class_agnostic` is True, all output classes
have label 1.
The groundtruth coordinates of boxes/keypoints in 'groundtruth' dictionary are
normalized relative to the (potentially padded) input image, while the
coordinates in 'detection' dictionary are normalized relative to the true
image shape.
Args:
images: A single 4D uint8 image tensor of shape [batch_size, H, W, C].
keys: A [batch_size] string/int tensor with image identifier.
detections: A dictionary of detections, returned from
DetectionModel.postprocess().
groundtruth: (Optional) Dictionary of groundtruth items, with fields:
'groundtruth_boxes': [batch_size, max_number_of_boxes, 4] float32 tensor
of boxes, in normalized coordinates.
'groundtruth_classes': [batch_size, max_number_of_boxes] int64 tensor of
1-indexed classes.
'groundtruth_area': [batch_size, max_number_of_boxes] float32 tensor of
bbox area. (Optional)
'groundtruth_is_crowd':[batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_difficult': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_group_of': [batch_size, max_number_of_boxes] int64
tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance
masks (Optional).
'groundtruth_keypoints': [batch_size, max_number_of_boxes, num_keypoints,
2] float32 tensor with keypoints (Optional).
'groundtruth_keypoint_visibilities': [batch_size, max_number_of_boxes,
num_keypoints] bool tensor with keypoint visibilities (Optional).
'groundtruth_labeled_classes': [batch_size, num_classes] int64
tensor of 1-indexed classes. (Optional)
'groundtruth_dp_num_points': [batch_size, max_number_of_boxes] int32
tensor. (Optional)
'groundtruth_dp_part_ids': [batch_size, max_number_of_boxes,
max_sampled_points] int32 tensor. (Optional)
'groundtruth_dp_surface_coords_list': [batch_size, max_number_of_boxes,
max_sampled_points, 4] float32 tensor. (Optional)
class_agnostic: Boolean indicating whether the detections are class-agnostic
(i.e. binary). Default False.
scale_to_absolute: Boolean indicating whether boxes and keypoints should be
scaled to absolute coordinates. Note that for IoU based evaluations, it
does not matter whether boxes are expressed in absolute or relative
coordinates. Default False.
original_image_spatial_shapes: A 2D int32 tensor of shape [batch_size, 2]
used to resize the image. When set to None, the image size is retained.
true_image_shapes: A 2D int32 tensor of shape [batch_size, 3]
containing the size of the unpadded original_image.
max_gt_boxes: [batch_size] tensor representing the maximum number of
groundtruth boxes to pad.
label_id_offset: offset for class ids.
Returns:
A dictionary with:
'original_image': A [batch_size, H, W, C] uint8 image tensor.
'original_image_spatial_shape': A [batch_size, 2] tensor containing the
original image sizes.
'true_image_shape': A [batch_size, 3] tensor containing the size of
the unpadded original_image.
'key': A [batch_size] string tensor with image identifier.
'detection_boxes': [batch_size, max_detections, 4] float32 tensor of boxes,
in normalized or absolute coordinates, depending on the value of
`scale_to_absolute`.
'detection_scores': [batch_size, max_detections] float32 tensor of scores.
'detection_classes': [batch_size, max_detections] int64 tensor of 1-indexed
classes.
'detection_masks': [batch_size, max_detections, H, W] uint8 tensor of
instance masks, reframed to full image masks. Note that these may be
binarized (e.g. {0, 1}), or may contain 1-indexed part labels. (Optional)
'detection_keypoints': [batch_size, max_detections, num_keypoints, 2]
float32 tensor containing keypoint coordinates. (Optional)
'detection_keypoint_scores': [batch_size, max_detections, num_keypoints]
float32 tensor containing keypoint scores. (Optional)
'detection_surface_coords': [batch_size, max_detection, H, W, 2] float32
tensor with normalized surface coordinates (e.g. DensePose UV
coordinates). (Optional)
'num_detections': [batch_size] int64 tensor containing number of valid
detections.
'groundtruth_boxes': [batch_size, num_boxes, 4] float32 tensor of boxes, in
normalized or absolute coordinates, depending on the value of
`scale_to_absolute`. (Optional)
'groundtruth_classes': [batch_size, num_boxes] int64 tensor of 1-indexed
classes. (Optional)
'groundtruth_area': [batch_size, num_boxes] float32 tensor of bbox
area. (Optional)
'groundtruth_is_crowd': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_difficult': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_group_of': [batch_size, num_boxes] int64 tensor. (Optional)
'groundtruth_instance_masks': 4D int64 tensor of instance masks
(Optional).
'groundtruth_keypoints': [batch_size, num_boxes, num_keypoints, 2] float32
tensor with keypoints (Optional).
'groundtruth_keypoint_visibilities': [batch_size, num_boxes, num_keypoints]
bool tensor with keypoint visibilities (Optional).
'groundtruth_labeled_classes': [batch_size, num_classes] int64 tensor
of 1-indexed classes. (Optional)
'num_groundtruth_boxes': [batch_size] tensor containing the maximum number
of groundtruth boxes per image.
Raises:
ValueError: if original_image_spatial_shape is not 2D int32 tensor of shape
[2].
ValueError: if true_image_shapes is not 2D int32 tensor of shape
[3].
"""
input_data_fields = fields.InputDataFields
if original_image_spatial_shapes is None:
original_image_spatial_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:3], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(original_image_spatial_shapes.shape) != 2 and
original_image_spatial_shapes.shape[1] != 2):
raise ValueError(
'`original_image_spatial_shape` should be a 2D tensor of shape '
'[batch_size, 2].')
if true_image_shapes is None:
true_image_shapes = tf.tile(
tf.expand_dims(tf.shape(images)[1:4], axis=0),
multiples=[tf.shape(images)[0], 1])
else:
if (len(true_image_shapes.shape) != 2
and true_image_shapes.shape[1] != 3):
raise ValueError('`true_image_shapes` should be a 2D tensor of '
'shape [batch_size, 3].')
output_dict = {
input_data_fields.original_image:
images,
input_data_fields.key:
keys,
input_data_fields.original_image_spatial_shape: (
original_image_spatial_shapes),
input_data_fields.true_image_shape:
true_image_shapes
}
detection_fields = fields.DetectionResultFields
detection_boxes = detections[detection_fields.detection_boxes]
detection_scores = detections[detection_fields.detection_scores]
num_detections = tf.cast(detections[detection_fields.num_detections],
dtype=tf.int32)
if class_agnostic:
detection_classes = tf.ones_like(detection_scores, dtype=tf.int64)
else:
detection_classes = (
tf.to_int64(detections[detection_fields.detection_classes]) +
label_id_offset)
if scale_to_absolute:
output_dict[detection_fields.detection_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[detection_boxes, original_image_spatial_shapes],
dtype=tf.float32))
else:
output_dict[detection_fields.detection_boxes] = detection_boxes
output_dict[detection_fields.detection_classes] = detection_classes
output_dict[detection_fields.detection_scores] = detection_scores
output_dict[detection_fields.num_detections] = num_detections
if detection_fields.detection_masks in detections:
detection_masks = detections[detection_fields.detection_masks]
output_dict[detection_fields.detection_masks] = resize_detection_masks(
detection_boxes, detection_masks, original_image_spatial_shapes)
if detection_fields.detection_surface_coords in detections:
detection_surface_coords = detections[
detection_fields.detection_surface_coords]
output_dict[detection_fields.detection_surface_coords] = (
shape_utils.static_or_dynamic_map_fn(
_resize_surface_coordinate_masks,
elems=[detection_boxes, detection_surface_coords,
original_image_spatial_shapes],
dtype=tf.float32))
if detection_fields.detection_keypoints in detections:
detection_keypoints = detections[detection_fields.detection_keypoints]
output_dict[detection_fields.detection_keypoints] = detection_keypoints
if scale_to_absolute:
output_dict[detection_fields.detection_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[detection_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
if detection_fields.detection_keypoint_scores in detections:
output_dict[detection_fields.detection_keypoint_scores] = detections[
detection_fields.detection_keypoint_scores]
else:
output_dict[detection_fields.detection_keypoint_scores] = tf.ones_like(
detections[detection_fields.detection_keypoints][:, :, :, 0])
if groundtruth:
if max_gt_boxes is None:
if input_data_fields.num_groundtruth_boxes in groundtruth:
max_gt_boxes = groundtruth[input_data_fields.num_groundtruth_boxes]
else:
raise ValueError(
'max_gt_boxes must be provided when processing batched examples.')
if input_data_fields.groundtruth_instance_masks in groundtruth:
masks = groundtruth[input_data_fields.groundtruth_instance_masks]
max_spatial_shape = tf.reduce_max(
original_image_spatial_shapes, axis=0, keep_dims=True)
tiled_max_spatial_shape = tf.tile(
max_spatial_shape,
multiples=[tf.shape(original_image_spatial_shapes)[0], 1])
groundtruth[input_data_fields.groundtruth_instance_masks] = (
shape_utils.static_or_dynamic_map_fn(
_resize_groundtruth_masks,
elems=[masks, true_image_shapes,
original_image_spatial_shapes,
tiled_max_spatial_shape],
dtype=tf.uint8))
output_dict.update(groundtruth)
image_shape = tf.cast(tf.shape(images), tf.float32)
image_height, image_width = image_shape[1], image_shape[2]
def _scale_box_to_normalized_true_image(args):
"""Scale the box coordinates to be relative to the true image shape."""
boxes, true_image_shape = args
true_image_shape = tf.cast(true_image_shape, tf.float32)
true_height, true_width = true_image_shape[0], true_image_shape[1]
normalized_window = tf.stack([0.0, 0.0, true_height / image_height,
true_width / image_width])
return box_list_ops.change_coordinate_frame(
box_list.BoxList(boxes), normalized_window).get()
groundtruth_boxes = groundtruth[input_data_fields.groundtruth_boxes]
groundtruth_boxes = shape_utils.static_or_dynamic_map_fn(
_scale_box_to_normalized_true_image,
elems=[groundtruth_boxes, true_image_shapes], dtype=tf.float32)
output_dict[input_data_fields.groundtruth_boxes] = groundtruth_boxes
if input_data_fields.groundtruth_keypoints in groundtruth:
# If groundtruth_keypoints is in the groundtruth dictionary. Update the
# coordinates to conform with the true image shape.
def _scale_keypoints_to_normalized_true_image(args):
"""Scale the box coordinates to be relative to the true image shape."""
keypoints, true_image_shape = args
true_image_shape = tf.cast(true_image_shape, tf.float32)
true_height, true_width = true_image_shape[0], true_image_shape[1]
normalized_window = tf.stack(
[0.0, 0.0, true_height / image_height, true_width / image_width])
return keypoint_ops.change_coordinate_frame(keypoints,
normalized_window)
groundtruth_keypoints = groundtruth[
input_data_fields.groundtruth_keypoints]
groundtruth_keypoints = shape_utils.static_or_dynamic_map_fn(
_scale_keypoints_to_normalized_true_image,
elems=[groundtruth_keypoints, true_image_shapes],
dtype=tf.float32)
output_dict[
input_data_fields.groundtruth_keypoints] = groundtruth_keypoints
if scale_to_absolute:
groundtruth_boxes = output_dict[input_data_fields.groundtruth_boxes]
output_dict[input_data_fields.groundtruth_boxes] = (
shape_utils.static_or_dynamic_map_fn(
_scale_box_to_absolute,
elems=[groundtruth_boxes, original_image_spatial_shapes],
dtype=tf.float32))
if input_data_fields.groundtruth_keypoints in groundtruth:
groundtruth_keypoints = output_dict[
input_data_fields.groundtruth_keypoints]
output_dict[input_data_fields.groundtruth_keypoints] = (
shape_utils.static_or_dynamic_map_fn(
_scale_keypoint_to_absolute,
elems=[groundtruth_keypoints, original_image_spatial_shapes],
dtype=tf.float32))
# For class-agnostic models, groundtruth classes all become 1.
if class_agnostic:
groundtruth_classes = groundtruth[input_data_fields.groundtruth_classes]
groundtruth_classes = tf.ones_like(groundtruth_classes, dtype=tf.int64)
output_dict[input_data_fields.groundtruth_classes] = groundtruth_classes
output_dict[input_data_fields.num_groundtruth_boxes] = max_gt_boxes
return output_dict
def get_evaluators(eval_config, categories, evaluator_options=None):
"""Returns the evaluator class according to eval_config, valid for categories.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
'keypoints': (optional) dict mapping this category's keypoints to unique
ids.
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
Returns:
An list of instances of DetectionEvaluator.
Raises:
ValueError: if metric is not in the metric class dictionary.
"""
evaluator_options = evaluator_options or {}
eval_metric_fn_keys = eval_config.metrics_set
if not eval_metric_fn_keys:
eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]
evaluators_list = []
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:
raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))
kwargs_dict = (evaluator_options[eval_metric_fn_key] if eval_metric_fn_key
in evaluator_options else {})
evaluators_list.append(EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](
categories,
**kwargs_dict))
if isinstance(eval_config, eval_pb2.EvalConfig):
parameterized_metrics = eval_config.parameterized_metric
for parameterized_metric in parameterized_metrics:
assert parameterized_metric.HasField('parameterized_metric')
if parameterized_metric.WhichOneof(
'parameterized_metric') == EVAL_KEYPOINT_METRIC:
keypoint_metrics = parameterized_metric.coco_keypoint_metrics
# Create category to keypoints mapping dict.
category_keypoints = {}
class_label = keypoint_metrics.class_label
category = None
for cat in categories:
if cat['name'] == class_label:
category = cat
break
if not category:
continue
keypoints_for_this_class = category['keypoints']
category_keypoints = [{
'id': keypoints_for_this_class[kp_name], 'name': kp_name
} for kp_name in keypoints_for_this_class]
# Create keypoint evaluator for this category.
evaluators_list.append(EVAL_METRICS_CLASS_DICT[EVAL_KEYPOINT_METRIC](
category['id'], category_keypoints, class_label,
keypoint_metrics.keypoint_label_to_sigmas))
return evaluators_list
def get_eval_metric_ops_for_evaluators(eval_config,
categories,
eval_dict):
"""Returns eval metrics ops to use with `tf.estimator.EstimatorSpec`.
Args:
eval_config: An `eval_pb2.EvalConfig`.
categories: A list of dicts, each of which has the following keys -
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name e.g., 'cat', 'dog'.
eval_dict: An evaluation dictionary, returned from
result_dict_for_single_example().
Returns:
A dictionary of metric names to tuple of value_op and update_op that can be
used as eval metric ops in tf.EstimatorSpec.
"""
eval_metric_ops = {}
evaluator_options = evaluator_options_from_eval_config(eval_config)
evaluators_list = get_evaluators(eval_config, categories, evaluator_options)
for evaluator in evaluators_list:
eval_metric_ops.update(evaluator.get_estimator_eval_metric_ops(
eval_dict))
return eval_metric_ops
def evaluator_options_from_eval_config(eval_config):
"""Produces a dictionary of evaluation options for each eval metric.
Args:
eval_config: An `eval_pb2.EvalConfig`.
Returns:
evaluator_options: A dictionary of metric names (see
EVAL_METRICS_CLASS_DICT) to `DetectionEvaluator` initialization
keyword arguments. For example:
evalator_options = {
'coco_detection_metrics': {'include_metrics_per_category': True}
}
"""
eval_metric_fn_keys = eval_config.metrics_set
evaluator_options = {}
for eval_metric_fn_key in eval_metric_fn_keys:
if eval_metric_fn_key in (
'coco_detection_metrics', 'coco_mask_metrics', 'lvis_mask_metrics'):
evaluator_options[eval_metric_fn_key] = {
'include_metrics_per_category': (
eval_config.include_metrics_per_category)
}
if (hasattr(eval_config, 'all_metrics_per_category') and
eval_config.all_metrics_per_category):
evaluator_options[eval_metric_fn_key].update({
'all_metrics_per_category': eval_config.all_metrics_per_category
})
# For coco detection eval, if the eval_config proto contains the
# "skip_predictions_for_unlabeled_class" field, include this field in
# evaluator_options.
if eval_metric_fn_key == 'coco_detection_metrics' and hasattr(
eval_config, 'skip_predictions_for_unlabeled_class'):
evaluator_options[eval_metric_fn_key].update({
'skip_predictions_for_unlabeled_class':
(eval_config.skip_predictions_for_unlabeled_class)
})
for super_category in eval_config.super_categories:
if 'super_categories' not in evaluator_options[eval_metric_fn_key]:
evaluator_options[eval_metric_fn_key]['super_categories'] = {}
key = super_category
value = eval_config.super_categories[key].split(',')
evaluator_options[eval_metric_fn_key]['super_categories'][key] = value
if eval_metric_fn_key == 'lvis_mask_metrics' and hasattr(
eval_config, 'export_path'):
evaluator_options[eval_metric_fn_key].update({
'export_path': eval_config.export_path
})
elif eval_metric_fn_key == 'precision_at_recall_detection_metrics':
evaluator_options[eval_metric_fn_key] = {
'recall_lower_bound': (eval_config.recall_lower_bound),
'recall_upper_bound': (eval_config.recall_upper_bound)
}
return evaluator_options
def has_densepose(eval_dict):
return (fields.DetectionResultFields.detection_masks in eval_dict and
fields.DetectionResultFields.detection_surface_coords in eval_dict) | PypiClean |
/Flask-ShellPlus-0.0.3.tar.gz/Flask-ShellPlus-0.0.3/flask_shellplus/__init__.py | import os
import sys
import logging
from collections import OrderedDict
import six
from flask import current_app
from flask.ext.script import Command, Option
from .utils import import_items, get_sa_models
__version__ = '0.0.3'
class Shell(Command):
"""
Runs a Python shell inside Flask application context.
:param banner: banner appearing at top of shell when started.
Not available when you choose IPython or ptiPython to use.
:param context: context used in you shell namespace. By default
contains the current app.
:param make_context: should be a callable object. Used to update shell context.
"""
help = description = 'Runs a Python shell inside Flask application context.'
def __init__(self, banner='', context=None, make_context=None):
self.banner = banner
self.context = context or {}
if make_context is not None:
self.context.update(make_context())
if not self.context:
self.context = self.context or dict(app=current_app)
def get_options(self):
return (
Option('--plain', action='store_true', dest='plain',
help='Tells Flask to use plain Python, not BPython nor IPython.'),
Option('--bpython', action='store_true', dest='bpython',
help='Tells Flask to use BPython, not IPython.'),
Option('--ptpython', action='store_true', dest='ptpython',
help='Tells Flask to use PTPython, not IPython.'),
Option('--ptipython', action='store_true', dest='ptipython',
help='Tells Flask to use PT-IPython, not IPython.'),
Option('--ipython', action='store_true', dest='ipython',
help='Tells Flask to use IPython, not BPython.'),
Option('--notebook', action='store_true', dest='notebook',
help='Tells Flask to use IPython Notebook.'),
Option('--no-browser', action='store_true', default=False, dest='no_browser',
help='Don\'t open the notebook in a browser after startup.'),
Option('--use-pythonrc', action='store_true', dest='use_pythonrc',
help='Tells Flask to execute PYTHONSTARTUP file (BE CAREFULL WITH THIS!)'),
Option('--print-sql', action='store_true', default=False,
help="Print SQL queries as they're executed"),
Option('--dont-load', action='append', dest='dont_load', default=[],
help='Ignore autoloading of some apps/models. Can be used several times.'),
Option('--quiet-load', action='store_true', default=False, dest='quiet_load',
help='Do not display loaded models messages'),
Option('--vi', action='store_true', default=use_vi_mode(), dest='vi_mode',
help='Load Vi key bindings (for --ptpython and --ptipython)'),
)
def setup_pythonrc(self, **options):
if options.get('use_pythonrc') is not True:
return
pythonrc = os.environ.get('PYTHONSTARTUP')
if not all([pythonrc, os.path.isfile(pythonrc)]):
return
global_ns = {}
with open(pythonrc) as rcfile:
try:
six.exec_(compile(rcfile.read(), pythonrc, 'exec'), global_ns)
except NameError:
print('Import pythonrc file {} failed'.format(pythonrc))
self.context.update(global_ns)
def setup_sql_printing(self, **options):
print_sql = options.get('print_sql')
if print_sql is not True:
return
db = self.context['db']
db.engine.echo = True # Used for SQLAlchemy
def setup_imports(self, **options):
app = self.context['app']
quiet_load = options.get('quiet_load')
dont_load = options.get('dont_load')
model_aliases = app.config.get('SHELLPLUS_MODEL_ALIASES', {})
basic_imports = {}
pre_imports = app.config.get('SHELLPLUS_PRE_IMPORTS', {})
post_imports = app.config.get('SHELLPLUS_POST_IMPORTS', {})
import_directives = OrderedDict(pre_imports)
import_directives.update(basic_imports)
import_directives.update(post_imports)
imported_objects = import_items(import_directives, quiet_load=quiet_load)
self.context.update(imported_objects)
def try_setuping_sa(self):
try:
self.context.update(get_sa_models(self.context['db']))
except (KeyError, AttributeError):
# This may caused when it's not a SQLAlchemy project
pass
def run(self, **options):
"""
Runs the shell. If no_bpython is False or use_bpython is True, then
a BPython shell is run (if installed). Else, if no_ipython is False or
use_python is True then a IPython shell is run (if installed).
:param options: defined in ``self.get_options``.
"""
self.setup_sql_printing(**options)
self.setup_pythonrc(**options)
self.try_setuping_sa()
vi_mode = options['vi_mode']
for key in ('notebook', 'plain', 'bpython', 'ptpython', 'ptipython', 'ipython'):
if options.get(key):
shell = key
break
else:
shell = get_available_shell()
self.setup_imports(**options)
context = self.context
if shell == 'notebook':
no_browser = options['no_browser']
notebook = get_notebook()
notebook(no_browser=no_browser, display_name=self.banner)
elif shell == 'bpython':
from bpython import embed
embed(banner=self.banner, locals_=context)
elif shell == 'ptpython':
from ptpython.repl import embed
embed(banner=self.banner, user_ns=context, vi_mode=vi_mode)
elif shell == 'ptipython':
from ptpython.ipython import embed
embed(user_ns=context, vi_mode=vi_mode)
elif shell == 'ipython':
from IPython import embed
embed(user_ns=context)
else:
# Use basic python shell
import code
code.interact(self.banner, local=context)
def get_notebook():
# import NotebookApp from IPython notebook
try:
from notebook.notebookapp import NotebookApp
except ImportError:
try:
from IPython.html.notebookapp import NotebookApp
except ImportError:
from IPython.frontend.html.notebook import notebookapp
NotebookApp = notebookapp.NotebookApp
def run_notebook(no_browser=False, display_name='notebook'):
app = NotebookApp.instance()
ipython_arguments = [] # Will implement to set specific IPython configs
notebook_arguments = [] # Will implement to set specific notebook configs
if no_browser is True:
notebook_arguments.append('--no-browser')
if '--notebook-dir' not in notebook_arguments:
notebook_arguments.extend(['--notebook-dir', '.'])
install_kernel_spec(app, display_name, ipython_arguments)
app.initialize(notebook_arguments)
app.start()
return run_notebook
def get_available_shell():
"""
:return: The first available one in 'bpython', 'ptipython', 'ptpython', 'ipython'
"""
shell = 'plain'
from imp import find_module
shell_deps = dict(
bpython=('bpython',),
ptipython=('ptpython', 'IPython'),
ipython=('IPython',),
ptpython=('ptpython',),
)
for _shell, deps in shell_deps.items():
try:
for mod in deps:
if not find_module(mod):
continue
shell = _shell
except ImportError:
continue
return shell
def use_vi_mode():
editor = os.environ.get('EDITOR')
if not editor:
return False
editor = os.path.basename(editor)
return editor.startswith('vi') or editor.endswith('vim')
def install_kernel_spec(app, display_name, ipython_arguments):
"""install an IPython >= 3.0 kernelspec that loads some extensions"""
if app.kernel_spec_manager is None:
try:
from jupyter_client.kernelspec import KernelSpecManager
except ImportError:
from IPython.kernel.kernelspec import KernelSpecManager
app.kernel_spec_manager = KernelSpecManager()
ksm = app.kernel_spec_manager
try_spec_names = [
'python3' if six.PY3 else 'python2',
'python',
]
if isinstance(try_spec_names, six.string_types):
try_spec_names = [try_spec_names]
for spec_name in try_spec_names:
try:
ks = ksm.get_kernel_spec(spec_name)
break
except Exception as e:
logging.warn(e)
continue
else:
raise Exception("No notebook (Python) kernel specs found")
ks.argv.extend(ipython_arguments)
ks.display_name = display_name
manage_py_dir, manage_py = os.path.split(os.path.realpath(sys.argv[0]))
if manage_py == 'manage.py' and os.path.isdir(manage_py_dir) and manage_py_dir != os.getcwd():
pythonpath = ks.env.get('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
pythonpath = pythonpath.split(':')
if manage_py_dir not in pythonpath:
pythonpath.append(manage_py_dir)
ks.env['PYTHONPATH'] = ':'.join(filter(None, pythonpath)) | PypiClean |
/FinPy-0.5.tar.gz/FinPy-0.5/finpy/financial/portfolio.py | import datetime as dt
import pandas as pd
import numpy as np
import random
import csv
from .order import Order
from .fincommon import FinCommon
import finpy.utils.fpdateutil as du
from finpy.utils import utils as ut
from finpy.financial.equity import get_tickdata
class Portfolio():
"""
Portfolio has three items.
equities is a panda Panel of equity data.
Reference by ticker. self.equities['AAPL']
cash is a pandas series with daily cash balance.
total is the daily balance.
order_list is a list of Order
"""
def __init__(self, equities, cash, dates, order_list=None):
self.equities = pd.Panel(equities)
"""
:var equities: is a Panel of equities.
"""
if order_list == None:
self.order = []
else:
ol = order_list
ol.sort(key=lambda x: x.date)
self.order = ol
for x in [x for x in order_list if x.price == None]:
x.price = self.equities[x.tick]['close'][x.date]
self.cash = pd.Series(index=dates)
self.cash[0] = cash
self.total = pd.Series(index=dates)
self.total[0] = self.dailysum(dates[0])
def dailysum(self, date):
" Calculate the total balance of the date."
equities_total = np.nansum(self.equities.loc[:,date,'shares'] * self.equities.loc[:,date,'close'])
total = equities_total + self.cash[date]
return total
def buy(self, shares, tick, price, date, update_ol=False):
"""
Portfolio Buy
Calculate total, shares and cash upto the date.
Before we buy, we need to update share numbers. "
"""
self.cal_total(date)
last_valid = self.equities.loc[tick,:,'shares'].last_valid_index()
self.equities.loc[tick, last_valid:date, 'shares'] = self.equities.loc[tick, last_valid, 'shares']
self.equities.loc[tick, date, 'shares'] += shares
self.cash[date] -= price*shares
self.total[date] = self.dailysum(date)
if update_ol:
self.order.append(Order(action="buy", date=date, tick=tick, shares=shares, price=self.equities[tick]['close'][date]))
def sell(self, shares, tick, price, date, update_ol=False):
"""
Portfolio sell
Calculate shares and cash upto the date.
"""
self.cal_total(date)
last_valid = self.equities.loc[tick,:,'shares'].last_valid_index()
self.equities.loc[tick, last_valid:date, 'shares'] = self.equities.loc[tick, last_valid, 'shares']
self.equities.loc[tick, date, 'shares'] -= shares
self.cash[date] += price*shares
self.total[date] = self.dailysum(date)
if update_ol:
self.order.append(Order(action="sell", date=date, tick=tick, shares=shares, price=self.equities[tick]['close'][date]))
def fillna_cash(self, date):
" fillna on cash up to date "
update_start = self.cash.last_valid_index()
update_end = date
self.cash[update_start:update_end] = self.cash[update_start]
return update_start, update_end
def fillna(self, date):
"""
fillna cash and all equities.
return update_start and update_end.
"""
update_start, update_end = self.fillna_cash(date)
for tick in self.equities:
self.equities.loc[tick, update_start:update_end,'shares'] = self.equities.loc[tick, update_start, 'shares']
return update_start, update_end
def cal_total(self, date=None):
"""
Calculate total up to "date".
"""
if date == None:
equities_sum = pd.Series(index=self.ldt_timestamps())
each_total = self.equities.loc[:,:,'close'] * self.equities.loc[:,:,'shares']
equities_sum = each_total.sum(axis=1)
self.total = self.cash + equities_sum
else:
start, end = self.fillna(date)
equities_total_df = self.equities.loc[:,start:end,'shares'] * self.equities.loc[:,start:end,'close']
equities_total = equities_total_df.sum(axis=1)
self.total[start:end ] = equities_total + self.cash[start:end]
def put_orders(self):
"""
Put the order list to the DataFrame.
Update shares, cash columns of each Equity
"""
for o in self.order:
if o.action.lower() == "buy":
self.buy(date=o.date, shares=np.float(o.shares), price=np.float(o.price), tick=o.tick)
elif o.action.lower() == "sell":
self.sell(shares=np.float(o.shares), tick=o.tick, price=np.float(o.price), date=o.date)
def sim(self, ldt_timestamps=None):
"""
Go through each day and calculate total and cash.
"""
self.put_orders()
if ldt_timestamps == None:
ldt_timestamps = self.ldt_timestamps()
dt_end = ldt_timestamps[-1]
self.cal_total()
def csvwriter(self, equity_col=None, csv_file="pf.csv", total=True, cash=True, d=','):
"""
Write the content of the Portfolio to a csv file.
If total is True, the total is printed to the csv file.
If cash is True, the cash is printed to the csv file.
equity_col specify which columns to print for an equity.
The specified columns of each equity will be printed.
"""
lines = []
l = []
l.append("Date")
if total:
l.append("Total")
if cash:
l.append("Cash")
if equity_col != None:
for e in self.equities:
for col in equity_col:
label = e + col
l.append(label)
lines.append(l)
for i in self.ldt_timestamps():
l = []
l.append(i.strftime("%Y-%m-%d"))
if total:
l.append(round(self.total[i], 2))
if cash:
l.append(round(self.cash[i], 2))
if equity_col != None:
for e in self.equities:
for col in equity_col:
l.append(round(self.equities[e][col][i], 2))
lines.append(l)
with open(csv_file, 'w') as fp:
cw = csv.writer(fp, lineterminator='\n', delimiter=d)
for line in lines:
cw.writerow(line)
def write_order_csv(self, csv_file="pf_order.csv", d=','):
lines = []
for i in self.order:
l = []
l.append(i.date.strftime("%Y-%m-%d"))
l.append(i.tick)
l.append(i.action)
l.append(i.shares)
lines.append(l)
with open(csv_file, 'w') as fp:
cw = csv.writer(fp, lineterminator='\n', delimiter=d)
for line in lines:
cw.writerow(line)
def daily_return(self,tick=None):
"""
Return the return rate of each day, a list.
:param tick: The ticker of the equity.
:type string:
"""
if tick == None:
total = self.total
else:
total = self.equities.loc[tick,:,'close']
daily_rtn = total/total.shift(1)-1
daily_rtn[0] = 0
return np.array(daily_rtn)
def avg_daily_return(self, tick=None):
" Average of the daily_return list "
return np.average(self.daily_return(tick))
def std(self, tick=None):
" Standard Deviation of the daily_return "
return np.std(self.daily_return(tick))
def normalized(self, tick=None):
if tick == None:
return self.total/self.total[0]
else:
return self.equities[tick]['close']/self.equities[tick]['close'][0]
def normalized_price(self, tick):
self.equities[tick]['open'] = self.equities[tick]['open'] * self.equities[tick]['close']/self.equities[tick]['actual_close']
self.equities[tick]['high'] = self.equities[tick]['high'] * self.equities[tick]['close']/self.equities[tick]['actual_close']
self.equities[tick]['low'] = self.equities[tick]['low'] * self.equities[tick]['close']/self.equities[tick]['actual_close']
def sortino(self, k=252, tick=None):
"""
Return Sortino Ratio.
You can overwirte the coefficient with k.
The default is 252.
"""
daily_rtn = self.daily_return(tick)
negative_daily_rtn = daily_rtn[daily_rtn < 0]
sortino_dev = np.std( negative_daily_rtn)
sortino = (self.avg_daily_return(tick) / sortino_dev) * np.sqrt(k)
return sortino
def return_ratio(self, tick=None):
" Return the return ratio of the period "
if tick == None:
return self.total[-1]/self.total[0]
else:
return self.equities[tick]['close'][-1]/self.equities.loc[tick]['close'][0]
def moving_average(self, window=20, tick=None):
"""
Return an array of moving average. Window specified how many days in
a window.
"""
if tick == None:
ma = pd.stats.moments.rolling_mean(self.total, window=window)
else:
ma = self.equities[tick].stats.moments.rolling_mean(window=window)
ma[0:window] = ma[window]
return ma
def drawdown(self, window=10):
"""
Find the peak within the retrospective window.
Drawdown is the difference between the peak and the current value.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
merged_data = self.total[pd.Index(pre_timestamps[0]), ldt_timestamps[-1]]
total_timestamps = merged_data.index
dd = pd.Series(index=ldt_timestamps)
j = 0
for i in range(len(pre_timestamps), len(total_timestamps)):
win_start = total_timestamps[i - window]
win_end = total_timestamps[i]
ts_value = merged_data[win_start:win_end]
current = merged_data[win_end]
peak = np.amax(ts_value)
dd[j] = (peak-current)/peak
j += 1
return dd
def random_choose_tick(self, exclude=[]):
"""
Randomly return a ticker in the portfolio.
The items in exclude list are not in the select pool.
"""
ex_set = set(exclude)
pf_set = set([x for x in self.equities])
sel_ls = [s for s in pf_set - ex_set]
return random.choice(sel_ls)
def equities_long(self, date):
"""
Return the list of long equities on the date.
"Long equities" means the number of shares of the equity is greater than 0.
"""
return [x for x in self.equities if self.equities[x].shares[date] > 0]
def ldt_timestamps(self):
"""
Return an array of datetime objects.
"""
ldt_index = self.total.index
dt_start = ldt_index[0]
dt_end = ldt_index[-1]
dt_timeofday = dt.timedelta(hours=16)
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
return ldt_timestamps
def excess_return(self, rf_tick="$TNX", tick=None):
"""
An excess return is the difference between an asset's return and the riskless rate.
"""
return self.daily_return(tick=tick) - ut.riskfree_return(self.ldt_timestamps(), rf_tick=rf_tick)
def mean_excess_return(self, rf_tick="$TNX", tick=None):
return np.mean(self.excess_return(rf_tick=rf_tick, tick=tick))
def residual_return(self, benchmark, rf_tick="$TNX", tick=None):
"""
A residual return is the excess return minus beta times the benchmark excess return.
"""
beta = self.beta(benchmark, tick)
return self.excess_return(rf_tick=rf_tick, tick=tick) - beta * self.excess_return(rf_tick=rf_tick, tick=benchmark)
def mean_residual_return(self, benchmark, rf_tick="$TNX", tick=None):
return np.mean(self.residual_return(benchmark=benchmark, rf_tick=rf_tick, tick=tick))
def residual_risk(self, benchmark, rf_tick="$TNX", tick=None):
"""
Residual Risk is the standard deviation of the residual return.
"""
return np.std(self.residual_return(benchmark=benchmark, rf_tick=rf_tick, tick=tick))
def active_return(self, benchmark, tick=None):
"""
An active return is the difference between the benchmark and the actual return.
"""
return self.daily_return(tick=tick) - self.daily_return(tick=benchmark)
def mean_active_return(self, benchmark, tick=None):
return np.mean(self.active_return(benchmark, tick))
def beta_alpha(self, benchmark):
"""
benchmark is an Equity representing the market.
It can be S&P 500, Russel 2000, or your choice of market indicator.
This function uses polyfit in numpy to find the closest linear equation.
"""
beta, alpha = np.polyfit(self.daily_return(tick=benchmark), self.daily_return(), 1)
return beta, alpha
def beta(self, benchmark, tick=None):
"""
benchmark is an Equity representing the market.
This function uses cov in numpy to calculate beta.
"""
benchmark_return = self.daily_return(tick=benchmark)
C = np.cov(benchmark_return, self.daily_return(tick=tick))/np.var(benchmark_return)
beta = C[0][1]/C[0][0]
return beta
def excess_risk(self, rf_tick="$TNX", tick=None):
"""
$FVX is another option. Five-Year treasury rate.
An excess risk is the standard deviation of the excess return.
"""
return np.std(self.excess_return(rf_tick=rf_tick, tick=tick))
def active_risk(self, benchmark, tick=None):
"""
An active risk is the standard deviation of the active return.
"""
return np.std(self.active_return(benchmark, tick))
def info_ratio(self, benchmark, rf_tick="$TNX", tick=None):
"""
Information Ratio
https://en.wikipedia.org/wiki/Information_ratio
Information Ratio is defined as active return divided by active risk,
where active return is the difference between the return of the security
and the return of a selected benchmark index, and active risk is the
standard deviation of the active return.
"""
return self.mean_active_return(benchmark=benchmark, tick=tick)/self.active_risk(benchmark=benchmark, tick=tick)
def appraisal_ratio(self, benchmark, rf_tick="$TNX", tick=None):
"""
Appraisal Ratio
https://en.wikipedia.org/wiki/Appraisal_ratio
Appraisal Ratio is defined as residual return divided by residual risk,
where residual return is the difference between the return of the security
and the return of a selected benchmark index, and residual risk is the
standard deviation of the residual return.
"""
return self.mean_residual_return(benchmark, rf_tick, tick)/self.residual_risk(benchmark, rf_tick, tick)
def sharpe_ratio(self, rf_tick="$TNX", tick=None):
"""
Return the Original Sharpe Ratio.
https://en.wikipedia.org/wiki/Sharpe_ratio
rf_tick is Ten-Year treasury rate ticker at Yahoo.
"""
return self.mean_excess_return(rf_tick=rf_tick, tick=tick)/self.excess_risk(rf_tick=rf_tick, tick=tick)
def up_ratio(self, date, tick, days=10):
"""
Return the ratio of the past up days.
This function only applies to equities.
"""
ldt_index = self.ldt_timestamps()
last = date
first = date-days
up = 0.0
dn = 0.0
for i in range(first, last+1):
if self.equities[tick]['close'][i] < self.equities[tick]['close'][i-1]:
dn += 1
else:
up += 1
ratio = up / (dn + up)
return ratio
def dn_ratio(self, date,tick , days=10):
"""
Return the ratio of the past down days.
This function only applies to equities.
"""
ratio = 1.0 - self.up_ratio(date=date, tick=tick, days=days)
return ratio
def rolling_normalized_stdev(self, tick, window=50):
"""
Return the rolling standard deviation of normalized price.
This function only applies to equities.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
ldf_data = get_tickdata([tick], pre_timestamps)
merged_data = pd.concat([ldf_data[tick]['close'], self.equities[tick]['close']])
all_timestamps = pre_timestamps.append(ldt_timestamps)
merged_daily_rtn = (self.equities.loc[tick,:,'close']/self.equities.loc[tick,:,'close'].shift(1)-1)
merged_daily_rtn[0] = 0
sigma = pd.rolling_std(merged_daily_rtn, window=window)
return sigma[self.ldt_timestamps()]
def max_rise(self, tick, date, window=20):
"""
Find the maximum change percentage between the current date and the bottom of the retrospective window.
:param tick: ticker
:type tick: string
:param date: date to calculate max_rise
:type date: datetime
:param window: The days of window to calculate max_rise.
:type window: int
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
first = pre_timestamps[0]
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
try:
self.equties['close'][first]
merged_data = self.equties['close']
except:
ldf_data = get_tickdata([tick], pre_timestamps)
merged_data = pd.concat([ldf_data[tick]['close'], self.equities.loc[tick,:,'close']])
if(isinstance(date , int)):
int_date = ldt_timestamps[date]
else:
int_date = date
c = merged_data.index.get_loc(int_date)
m = merged_data[c-window:c].min()
r = (merged_data[c]-m)/merged_data[c]
return r
def max_fall(self, tick, date, window=20):
"""
Find the change percentage between the top and the bottom of the retrospective window.
:param tick: ticker
:type tick: string
:param date: date to calculate max_rise
:type date: datetime
:param window: The days of window to calculate max_rise.
:type window: int
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
first = pre_timestamps[0]
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
try:
self.equties['close'][first]
merged_data = self.equties['close']
except:
ldf_data = get_tickdata([tick], pre_timestamps)
merged_data = pd.concat([ldf_data[tick]['close'], self.equities.loc[tick,:,'close']])
if(isinstance(date , int)):
int_date = ldt_timestamps[date]
else:
int_date = date
c = merged_data.index.get_loc(int_date)
mx = merged_data[c-window:c].max()
mn = merged_data[c-window:c].min()
r = (mx-mn)/merged_data[c]
return r
def moving_average(self, tick, window=20):
"""
Return an array of moving average. Window specified how many days in
a window.
:param tick: ticker
:type tick: string
:param window: The days of window to calculate moving average.
:type window: int
"""
mi = self.bollinger_band(tick=tick, window=window, mi_only=True)
return mi
def bollinger_band(self, tick, window=20, k=2, mi_only=False):
"""
Return four arrays for Bollinger Band. The upper band at k times an N-period
standard deviation above the moving average. The lower band at k times an N-period
below the moving average.
:param tick: ticker
:type tick: string
:param window: The days of window to calculate Bollinger Band.
:type window: int
:param k: k *
:return bo: bo['mi'] is the moving average. bo['lo'] is the lower band.
bo['hi'] is the upper band. bo['ba'] is a seris of the position of the current
price relative to the bollinger band.
:type bo: A dictionary of series.
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, window)
# ldf_data has the data prior to our current interest.
# This is used to calculate moving average for the first window.
ldf_data = get_tickdata([tick], pre_timestamps)
merged_data = pd.concat([ldf_data[tick]['close'], self.equities[tick]['close']])
bo = dict()
bo['mi'] = pd.rolling_mean(merged_data, window=window)[ldt_timestamps]
if mi_only:
return bo['mi']
else:
sigma = pd.rolling_std(merged_data, window=window)
bo['hi'] = bo['mi'] + k * sigma[ldt_timestamps]
bo['lo'] = bo['mi'] - k * sigma[ldt_timestamps]
bo['ba'] = (merged_data[ldt_timestamps] - bo['mi']) / (k * sigma[ldt_timestamps])
return bo
def RSI(self, tick):
"""
Relative Strength Index
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:relative_strength_index_rsi
This function uses roughly 250 prior points to calculate RS.
:param tick: The ticker to calculate RSI
:type tick: string
:return rsi[ldt_timestamps]: RSI series
"""
ldt_timestamps = self.ldt_timestamps()
pre_timestamps = ut.pre_timestamps(ldt_timestamps, 250)
ldf_data = get_tickdata([tick], pre_timestamps)
merged_data = pd.concat([ldf_data[tick]['close'], self.equities[tick]['close']])
delta = merged_data.diff()
gain = pd.Series(delta[delta > 0], index=delta.index).fillna(0)
loss = pd.Series(delta[delta < 0], index=delta.index).fillna(0).abs()
avg_gain = pd.Series(index=delta.index)
avg_loss = pd.Series(index=delta.index)
rsi = pd.Series(index=delta.index)
avg_gain[14] = gain[1:15].mean()
avg_loss[14] = loss[1:15].mean()
for i in range(15, len(delta.index)):
avg_gain[i] = (avg_gain[i-1]*13+gain[i])/14
avg_loss[i] = (avg_loss[i-1]*13+loss[i])/14
if avg_loss[i] == 0:
rsi[i] = 100
else:
rs = avg_gain[i]/avg_loss[i]
rsi[i] = 100 - 100/(1+rs)
return(rsi[ldt_timestamps]) | PypiClean |
/MagCalibration_jdickerson80-1.2.0-py3-none-any.whl/magCalibration/scripts/calcStats.py | import os
import sys
import csv
import numpy as np
import scipy.stats as st
def getDirectory(path, spectra_dir):
NWPS_dir = f"{path}{spectra_dir}/avg/10/"
file_list = [f for f in os.listdir(NWPS_dir) if f.endswith(".mrc")]
num_files = len(file_list)
if num_files < 1:
NWPS_dir = f"{path}{spectra_dir}/avg/"
return NWPS_dir
#def runScript(path, aniso_corrected, spectra_dir, write_dir, aniso_file):
def runScript(path, spectra_dir, write_dir, aniso_file):
print_text = []
#get directory
#directory = getDirectory(path, spectra_dir)
aniso_corrected = True
if aniso_file == "":
aniso_corrected = False
ratio = 1.0
if aniso_corrected:
with open(aniso_file, 'r') as f:
line = f.readline()
l = line.split(',')
ratio = float(l[1])/float(l[0])
#and now read the pixel sizes
'''
directory2 = directory
if aniso_corrected:
directory2 = f"{directory}stretch/"
all_files = os.listdir(directory2)
the_file = ''
for filename in all_files:
if filename.endswith(".csv") and "px" in filename and "smooth" in filename:
the_file = filename
break
'''
overall = []
px_vals = []
#with open(directory2+the_file, 'r') as f:
with open(f"{write_dir}pxSizes.csv", 'r') as f:
for line in f.readlines():
l = line.split(',')
if float(l[1]) > 0:
#val = (2*float(l[1]) + ratio*float(l[1]))/3
#val = (float(l[1]) * (ratio*float(l[1])))**0.5
val = ((float(l[1])**2 + (ratio*float(l[1]))**2)/2)**0.5
overall.append([l[0], val])
if aniso_corrected:
px_vals.append(val)
else:
px_vals.append(float(l[1]))
if aniso_corrected:
#now write
with open(write_dir+"pxSizes_adjust.csv", 'w') as f:
writer = csv.writer(f)
writer.writerows(overall)
#Now it time for the stats
#now remove any outliers if more than a certain amoumy
px_sizes = []
if len(px_vals) >= 3:
px_vals = np.array(px_vals)
mean_px = np.mean(px_vals)
stdev = np.std(px_vals)
d_from_mean = abs(px_vals-mean_px)
max_deviations = 2
not_outlier = d_from_mean < max_deviations * stdev
px_sizes = px_vals[not_outlier]
else:
px_sizes = np.array(px_vals)
mean_px = np.mean(px_sizes)
print("Mean px: " + str(round(mean_px, 4)))
print_text.append("Mean px: " + str(round(mean_px, 4)))
if len(px_vals) >= 3:
interval_px = st.t.interval(alpha=0.95, df=len(px_sizes)-1, loc=mean_px, scale=st.sem(px_sizes))
print("Interval px: " + str(interval_px))
print_text.append("Interval px: " + str(interval_px))
conf = mean_px - interval_px[0]
print("Conf px: " + str(conf))
print_text.append("Conf px: " + str(conf))
with open(write_dir+"stats.csv", 'w') as f:
f.write(f"{mean_px},{interval_px[0]},{interval_px[1]}")
else:
with open(write_dir+"stats.csv", 'w') as f:
f.write(f"{mean_px}")
#finally write the printText file
with open(f"{write_dir}outputText.txt", 'a') as f:
for line in print_text:
f.write(f"{line}\n") | PypiClean |
/Curp-1.3.1.tar.gz/Curp-1.3.1/curp/dynamics/integrator.py | from __future__ import print_function
# standard modules
import os, sys
import time
import numpy
# curp modules
topdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
if topdir not in sys.path: sys.path.insert(0, topdir)
from utility import TimeStore
import clog as logger
class NEVIntegrator(TimeStore):
coef = 4.184*10.0**(-4) # dt*f/m (fs*kcal/mol/A/u) => v(A/fs)
gas_const = 8.3144621 # gas constant (J/K/mol)
def __init__(self, topology, setting, interact_table, crd=None, vel=None):
TimeStore.__init__(self)
self.setup(topology, setting, interact_table)
self.__crd = crd
self.__vel = vel
def setup(self, topology, setting, interact_table):
self.__tpl = topology
self.__setting = setting
# decide force calculator
import twobody
TwoBodyCalculator = twobody.get_calculator(setting.curp.potential)
self.__tbf = TwoBodyCalculator(topology, setting)
self.__tbf.setup(interact_table, check=False)
self.__interact_table = [ numpy.array(t) for t in interact_table ]
# decide integrator
params = setting.dynamics
if params.integrator == 'vverlet':
self.integrate = self.do_vverlet
elif params.integrator == 'leapfrog':
self.integrate = self.do_leapfrog
else:
pass
def run(self, data=None):
"""Run the one step integrator."""
if data:
cstep, (crd, vel, pbc) = data
else:
crd, vel = self.__crd, self.__vel
logger.debug(' calculate dynamics ...')
masses = self.__tpl.get_atom_info()['masses']
force = self.cal_force(crd)
params = self.__setting.dynamics
results = self.integrate(crd, force, vel, masses, params)
return results
def cal_force(self, crd):
return self.__tbf.cal_force(crd)
def integrate(self, crd, frc, vel, masses):
pass
def do_vverlet(self, crd, frc, vel, masses, params):
"""Integrate the coordinate and velocity according to
velocity verlet algorithm.
"""
# istp : current step
# crd : current step's coordinate
# vel : current step's velocity
# frc : current step's force
# crd_next : next step's coordinate
# vel_next : next step's velocity
# frc_next : next step's force
# Preparation of the dynamcs on 0th step
ms = numpy.array(masses)[:, None]
dt = params.dt * 1000.0
nstep = params.num_steps
coef = self.coef
# log the information of the restart file
logger.info('*** The information of the restart file ***')
# log temperature
ek, temp = self.cal_temp(vel, ms)
self.output_temp(ek, temp)
# log energy
if self.__setting.output.output_energy: self.__tbf.output_energy()
# log forces
if logger.is_debug(): self.__tbf.output_force()
# do dynamics
for istp_1 in range(nstep):
istp = istp_1+1
t0 = time.time()
logger.set_curstep(istp)
logger.info_cycle('*** ISTEP = {}, CURRENT TIME = {} ***'
.format(istp, istp * dt))
# calculate next step's coordinate
# r(t+dt) = r(t) + dt*v(t) + dt^2/2m * F(t)
# crd_next = crd + dt*vel + 0.5*dt*dt * frc / ms * coef
crd_next = crd + dt*vel + 0.5*dt*dt * frc / ms * coef
# calculate next step's forces from coordinate
frc_next = self.cal_force(crd_next)
# calculate next step's velocity
# v(t+dt) = v(t) + dt/2m * { F(t+dt) + F(t) }
vel_next = vel + 0.5*dt* (frc_next+frc) / ms * coef
self.store_time('integrator', time.time()-t0)
yield istp, (crd_next, vel_next)
crd = crd_next
frc = frc_next
vel = vel_next
# log temperature
ek, temp = self.cal_temp(vel, ms)
self.output_temp(ek, temp)
# log energy
if self.__setting.output.output_energy: self.__tbf.output_energy()
# log forces
if logger.is_debug(): self.__tbf.output_force()
def do_leapfrog(self, crd, frc, vel, masses, params):
"""Integrate the coordinate and velocity according to
leap frog algorithm.
"""
# istp : current step(t)
# crd : current step(t)'s coordinate
# frc : current step(t)'s force
# vel : current step(t-dt/2)'s velocity
# crd_next : next step(t+dt)'s coordinate
# vel_next : next step(t+dt/2))'s velocity
# frc_next : next step(t+dt)'s force
# vel_dt = 0.5*(vel + vel_next)
# Preparation of the dynamcs on 0th step
ms = numpy.array(masses)[:, None]
# dt = params.dt
# dt = params.dts
dt = params.dt * 1000.0
nstep = params.num_steps
coef = self.coef
# log the information of the restart file
logger.info('*** The information of the restart file ***')
logger.info(" Velocity is 0+1/2-th step' informations")
# log temperature
ek, temp = self.cal_temp(vel, ms)
self.output_temp(ek, temp)
# log energy
if self.__setting.output.output_energy: self.__tbf.output_energy()
# log forces
if logger.is_debug(): self.__tbf.output_force()
# do dynamics
for istp_1 in range(nstep):
istp = istp_1+1
t0 = time.time()
logger.set_curstep(istp)
logger.info_cycle('*** ISTEP = {}, CURRENT TIME = {} ***'
.format(istp, istp * dt))
# calculate next step's velocity
# v(t+dt/2) = v(t-dt/2) + dt * F(t) /m
vel_next = vel + dt*frc/ms * coef
vel_t = 0.5*( vel + vel_next )
# calculate next step's coordinate
# r(t+dt) = r(t) + dt*v(t+dt/2)
crd_next = crd + dt*vel_next
# calculate next step's forces from coordinate
frc_next = self.cal_force(crd_next)
self.store_time('integrator', time.time()-t0)
yield istp, (crd_next, vel_next)
crd = crd_next
frc = frc_next
vel = vel_next
# log temperature
ek, temp = self.cal_temp(vel_t, ms)
self.output_temp(ek, temp)
# log energy
if self.__setting.output.output_energy: self.__tbf.output_energy()
# log forces
if logger.is_debug(): self.__tbf.output_force()
def cal_temp(self, vel, ms):
"""Calculate the temperature and Kenitic energy."""
gas = self.gas_const
dof = vel.size # defree of freedom
energy_tmp = numpy.sum( ms * vel**2)
energy = 0.5 * energy_tmp / self.coef
temp = 10.0**7 * energy_tmp / dof / gas
return energy, temp
def output_temp(self, energy, temp):
"""Output the temperature and Kinetic energy into log file."""
logger.info_cycle(' Temperature : {:>8.2f} (K) '.format(temp))
logger.info_cycle(' Kinetic energy : {:>8.5f} (K) '.format(energy)) | PypiClean |
/Hooke-1.0.0.alpha%20(Ninken).tar.gz/Hooke-1.0.0.alpha (Ninken)/hooke/plugin/tutorial.py | import logging
import StringIO
import sys
from numpy import arange
from ..command import Command, Argument, Failure
from ..config import Setting
from ..interaction import PointRequest, PointResponse
from ..util.si import ppSI, split_data_label
from . import Plugin
from .curve import CurveArgument
class TutorialPlugin (Plugin):
"""An example plugin explaining how to code plugins.
Unlike previous versions of Hooke, the class name is no longer
important. Plugins identify themselves to
:func:`hooke.util.pluggable.construct_graph` by being subclasses
of :class:`hooke.plugin.Plugin`. However, for consistency we
suggest the following naming scheme, show here for the 'tutorial'
plugin:
=========== ==============
module file tutorial.py
class name TutorialPlugin
.name 'tutorial'
=========== ==============
To ensure filename sanity,
:func:`hooke.util.pluggable.construct_graph` requires that
:attr:`name` does match the submodule name, but don't worry,
you'll get a clear exception message if you make a mistake.
"""
def __init__(self):
"""TutorialPlugin initialization code.
We call our base class' :meth:`__init__` and setup
:attr:`_commands`.
"""
# This is the plugin initialization. When Hooke starts and
# the plugin is loaded, this function is executed. If there
# is something you need to do when Hooke starts, code it in
# this function.
sys.stderr.write('I am the Tutorial plugin initialization!\n')
# This super() call similar to the old-style
# Plugin.__init__
# but super() is more robust under multiple inheritance.
# See Guido's introduction:
# http://www.python.org/download/releases/2.2.3/descrintro/#cooperation
# And the related PEPs:
# http://www.python.org/dev/peps/pep-0253/
# http://www.python.org/dev/peps/pep-3135/
super(TutorialPlugin, self).__init__(name='tutorial')
# We want :meth:`commands` to return a list of
# :class:`hooke.command.Command` instances. Rather than
# instantiate the classes for each call to :meth:`commands`,
# we instantiate them in a list here, and rely on
# :meth:`hooke.plugin.Plugin.commands` to return copies of
# that list.
self._commands = [DoNothingCommand(self), HookeInfoCommand(self),
PointInfoCommand(self),]
def dependencies(self):
"""Return a list of names of :class:`hooke.plugin.Plugin`\s we
require.
Some plugins use features from other plugins. Hooke makes sure that
plugins are configured in topological order and that no plugin is
enabled if it is missing dependencies.
"""
return ['vclamp']
def default_settings(self):
"""Return a list of :class:`hooke.config.Setting`\s for any
configurable plugin settings.
The suggested section setting is::
Setting(section=self.setting_section, help=self.__doc__)
You only need to worry about this if your plugin has some
"magic numbers" that the user may want to tweak, but that
won't be changing on a per-command basis.
You should lead off the list of settings with the suggested
section setting mentioned above.
"""
return [
# We disable help wrapping, since we've wrapped
# TutorialPlugin.__doc__ ourselves, and it's more than one
# paragraph (textwrap.fill, used in
# :meth:`hooke.config.Setting.write` only handles one
# paragraph at a time).
Setting(section=self.setting_section, help=self.__doc__,
wrap=False),
Setting(section=self.setting_section, option='favorite color',
value='orange', help='Your personal favorite color.'),
]
# Define common or complicated arguments
# Often, several commands in a plugin will use similar arguments. For
# example, many curves in the 'playlist' plugin need a playlist to act
# on. Rather than repeating an argument definition in several times,
# you can keep your code DRY (Don't Repeat Yourself) by defining the
# argument at the module level and referencing it during each command
# initialization.
def color_callback(hooke, command, argument, value):
"""If `argument` is `None`, default to the configured 'favorite color'.
:class:`hooke.command.Argument`\s may have static defaults, but
for dynamic defaults, they use callback functions (like this one).
"""
if value != None:
return value
return command.plugin.config['favorite color']
ColorArgument = Argument(
name='color', type='string', callback=color_callback,
help="Pick a color, any color.")
# See :func:`hooke.ui.gui.panel.propertyeditor.prop_from_argument` for
# a situation where :attr:`type` is important.
class DoNothingCommand (Command):
"""This is a boring but working example of an actual Hooke command.
As for :class:`hooke.plugin.Plugin`\s, the class name is not
important, but :attr:`name` is. :attr:`name` is used (possibly
with some adjustment) as the name for accessing the command in the
various :class:`hooke.ui.UserInterface`\s. For example the
`'do nothing'` command can be run from the command line UI with::
hooke> do_nothing
Note that if you now start Hooke with the command's plugin
activated and you type in the Hooke command line "help do_nothing"
you will see this very text as output. That is because we set
:attr:`_help` to this class' docstring on initialization.
"""
def __init__(self, plugin):
# See the comments in TutorialPlugin.__init__ for details
# about super() and the docstring of
# :class:`hooke.command.Command` for details on the __init__()
# arguments.
super(DoNothingCommand, self).__init__(
name='do nothing',
arguments=[ColorArgument],
help=self.__doc__, plugin=plugin)
def _run(self, hooke, inqueue, outqueue, params):
"""This is where the command-specific magic will happen.
If you haven't already, read the Architecture section of
:file:`doc/hacking.txt` (also available `online`_). It
explains the engine/UI setup in more detail.
.. _online:
http://www.physics.drexel.edu/~wking/rsrch/hooke/hacking.html#architecture
The return value (if any) of this method is ignored. You
should modify the :class:`hooke.hooke.Hooke` instance passed
in via `hooke` and/or return things via `outqueue`. `inqueue`
is only important if your command requires mid-command user
interaction.
By the time this method is called, all the argument
preprocessing (callbacks, defaults, etc.) have already been
handled by :meth:`hooke.command.Command.run`.
"""
# On initialization, :class:`hooke.hooke.Hooke` sets up a
# logger to use for Hooke-related messages. Please use it
# instead of debugging 'print' calls, etc., as it is more
# configurable.
log = logging.getLogger('hooke')
log.debug('Watching %s paint dry' % params['color'])
class HookeInfoCommand (Command):
"""Get information about the :class:`hooke.hooke.Hooke` instance.
"""
def __init__(self, plugin):
super(HookeInfoCommand, self).__init__(
name='hooke info',
help=self.__doc__, plugin=plugin)
def _run(self, hooke, inqueue, outqueue, params):
outqueue.put('Hooke info:')
# hooke.config contains a :class:`hooke.config.HookeConfigParser`
# with the current hooke configuration settings.
config_file = StringIO.StringIO()
hooke.config.write(config_file)
outqueue.put('configuration:\n %s'
% '\n '.join(config_file.getvalue().splitlines()))
# The plugin's configuration settings are also available.
outqueue.put('plugin config: %s' % self.plugin.config)
# hooke.plugins contains :class:`hooke.plugin.Plugin`\s defining
# :class:`hooke.command.Command`\s.
outqueue.put('plugins: %s'
% ', '.join([plugin.name for plugin in hooke.plugins]))
# hooke.drivers contains :class:`hooke.driver.Driver`\s for
# loading curves.
outqueue.put('drivers: %s'
% ', '.join([driver.name for driver in hooke.drivers]))
# hooke.playlists is a
# :class:`hooke.playlist.Playlists` instance full of
# :class:`hooke.playlist.FilePlaylist`\s. Each playlist may
# contain several :class:`hooke.curve.Curve`\s representing a
# grouped collection of data.
playlist = hooke.playlists.current()
if playlist == None:
return
outqueue.put('current playlist: %s (%d of %d)'
% (playlist.name,
hooke.playlists.index(),
len(hooke.playlists)))
curve = playlist.current()
if curve == None:
return
outqueue.put('current curve: %s (%d of %d)'
% (curve.name,
playlist.index(),
len(playlist)))
class PointInfoCommand (Command):
"""Get information about user-selected points.
Ordinarily a command that knew it would need user selected points
would declare an appropriate argument (see, for example,
:class:`hooke.plugin.cut.CutCommand`). However, here we find the
points via user-interaction to show how user interaction works.
"""
def __init__(self, plugin):
super(PointInfoCommand, self).__init__(
name='point info',
arguments=[
CurveArgument,
Argument(name='block', type='int', default=0,
help="""
Data block that points are selected from. For an approach/retract
force curve, `0` selects the approaching curve and `1` selects the
retracting curve.
""".strip()),
],
help=self.__doc__, plugin=plugin)
def _run(self, hooke, inqueue, outqueue, params):
data = params['curve'].data[params['block']]
while True:
# Ask the user to select a point.
outqueue.put(PointRequest(
msg="Select a point",
curve=params['curve'],
block=params['block']))
# Get the user's response
result = inqueue.get()
if not isinstance(result, PointResponse):
inqueue.put(result) # put the message back in the queue
raise Failure(
'expected a PointResponse instance but got %s.'
% type(result))
point = result.value
# Act on the response
if point == None:
break
values = []
for column_name in data.info['columns']:
name,unit = split_data_label(column_name)
column_index = data.info['columns'].index(column_name)
value = data[point,column_index]
si_value = ppSI(value, unit, decimals=2)
values.append('%s: %s' % (name, si_value))
outqueue.put('selected point %d: %s'
% (point, ', '.join(values))) | PypiClean |
/KL_Audit_supportV3.0-1.0-py3-none-any.whl/AuditModule/core/applications/AuditManagementModules.py | from AuditModule.common import AppConstants
from AuditModule.util import Logging as LOGG
import traceback
from datetime import datetime
from AuditModule.core.persistences.adaptors.CassandraPersistenceAdaptor import CassandraDButility
cassandra_obj = CassandraDButility()
Logger = LOGG.get_logger()
def audit_logs_modules(application_type, content_type, application_data):
try:
user_name = ""
client_id = ""
user_role_name = ""
operations = ""
module = ""
parameter_lable = {}
status = ""
strategy_json = AppConstants.AuditLogsConstants.audit_logs_mapping_json.get(application_type)
user_name, client_id, user_role_name, operations, parameter_lable, status = \
audit_logs_user_access_strategies(application_data)
return user_name, client_id, user_role_name, operations, parameter_lable, status
except Exception as e:
audit_message = ""
action = ""
user_id = ""
json_string = {}
label = ""
Logger.error('Error in audit Log modules ', str(e))
return audit_message, action, user_id, json_string, label
def audit_logs_user_access_strategies(user_data):
try:
user_name = ""
client_id = ""
user_role_name = ""
operations = ""
module = ""
parameter_lable = {}
status = ""
if 'query_json' in user_data:
response = user_data.get('query_json', "")
if type(response) is not str:
user_name = response.get('user_name', "")
if not user_name and type(response) is not str:
user_name = response.get('user_id', "")
if not user_name and 'cookies' in user_data:
user_name = user_data['cookies']['user_id']
if not user_name and 'user_id' in user_data:
user_name = user_data['user_id']
if type(user_data.get('user_id')) is dict:
user_name = user_data['user_id'].get("user_id", "")
operations = user_data.get("action", "")
client_id = response.get("client_id", "")
if not client_id and 'client_id' in user_data:
client_id = user_data.get("client_id", "")
if type(user_data.get('client_id')) is dict:
client_id = user_data['client_id'].get("client_id", "")
user_role_name = response.get("user_role_name", "")
parameter_lable = user_data['query_json']
# module = response.get("module", "")
status = user_data['query_json'].get("status", "success")
return user_name, client_id, user_role_name, operations, parameter_lable, status
except Exception as e:
print((traceback.format_exc()))
Logger.error("Error in user Access ", str(e))
raise Exception(str(e))
def generate_id(table_name, op_type):
try:
if table_name:
data = dict()
check = cassandra_obj.table_check(table_name)
if check:
counter = 0
data['id'] = counter + 1
data['time'] = datetime.utcnow()
data['name'] = "audit_id"
cassandra_obj.insert_table_id(table_name, data)
Logger.info("created and inserted data successfully")
return data['id']
else:
name = "audit_id"
response = cassandra_obj.fetch_table_id(name)
for i in response:
resp_id = i[0]
data['id'] = resp_id + 1
data['time'] = datetime.utcnow()
data['name'] = name
cassandra_obj.insert_table_id(table_name, data)
Logger.info("updated data successfully")
return data['id']
except Exception as e:
print((traceback.format_exc()))
Logger.error("Error in user Access ", str(e))
raise Exception(str(e)) | PypiClean |
/BlazeForm-0.5.1.tar.gz/BlazeForm-0.5.1/changelog.rst | Changelog
=========
0.5.1 released 2020-10-23
-------------------------
- Fix mutable default argument in tolist (556fcf0_)
.. _556fcf0: https://github.com/blazelibs/blazeform/commit/556fcf0
0.5.0 released 2020-07-14
-------------------------
- drop support for python 2
- modernize package setup and CI
- support python 3.8 (1d9afa9_)
.. _1d9afa9: https://github.com/blazelibs/blazeform/commit/1d9afa9
0.4.2 released 2018-01-17
-------------------------
* handle string type in file upload for blank submissions
0.4.1 released 2017-06-02
-------------------------
* update validation messages for consistency across python versions
0.4.0 released 2016-11-23
-------------------------
* added support for Python 3 (3.4 and 3.5)
* set up CI and coverage
0.3.9 released 2016-05-20
-------------------------
* make is_empty more general with respect to input type, 0 should not be empty
0.3.8 released 2016-02-24
-------------------------
* update compatibility with FormEncode to include 1.3
0.3.7 released 2014-10-27
-------------------------
* fix checkbox element to handle empty value as on/true for IE 9/10 compat.
0.3.6 released 2014-10-15
-------------------------
* allow labels for logical groups, such as radio buttons or checkboxes
0.3.5 released 2014-08-20
-------------------------
* ensure that form validators and element processors which are FE validators
are instances
0.3.4 released 2012-07-05
-------------------------
* form now has all_errors() method which returns form and field errors as (list,
dict) tuple (respectively).
* update the way file uploads are checked for being sent. Previously, we were
testing for the filename header to be None, but Werkzeug is sending it over as
an empty string in the FileStorage object now. Could theoretically result in
behavior change, but only in narrow edge cases.
0.3.3 released 2011-11-16
-------------------------
* TextAreaElement now uses maxlength kwarg
0.3.2 released 2011-06-11
-------------------------
* fix broken distribution of 0.3.1
0.3.1 released 2011-06-11
-------------------------
* fixed bug in radio button rendering after validation error
| PypiClean |
/GoogleAppEngineMapReduce-1.9.22.0.tar.gz/GoogleAppEngineMapReduce-1.9.22.0/mapreduce/json_util.py | """Json related utilities."""
import copy
import datetime
import logging
try:
import json
except ImportError:
import simplejson as json
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.ext import db
from google.appengine.ext import ndb
# pylint: disable=invalid-name
class JsonEncoder(json.JSONEncoder):
"""MR customized json encoder."""
TYPE_ID = "__mr_json_type"
def default(self, o):
"""Inherit docs."""
if type(o) in _TYPE_TO_ENCODER:
encoder = _TYPE_TO_ENCODER[type(o)]
json_struct = encoder(o)
json_struct[self.TYPE_ID] = type(o).__name__
return json_struct
return super(JsonEncoder, self).default(o)
class JsonDecoder(json.JSONDecoder):
"""MR customized json decoder."""
def __init__(self, **kwargs):
if "object_hook" not in kwargs:
kwargs["object_hook"] = self._dict_to_obj
super(JsonDecoder, self).__init__(**kwargs)
def _dict_to_obj(self, d):
"""Converts a dictionary of json object to a Python object."""
if JsonEncoder.TYPE_ID not in d:
return d
type_name = d.pop(JsonEncoder.TYPE_ID)
if type_name in _TYPE_NAME_TO_DECODER:
decoder = _TYPE_NAME_TO_DECODER[type_name]
return decoder(d)
else:
raise TypeError("Invalid type %s.", type_name)
_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def _json_encode_datetime(o):
"""Json encode a datetime object.
Args:
o: a datetime object.
Returns:
A dict of json primitives.
"""
return {"isostr": o.strftime(_DATETIME_FORMAT)}
def _json_decode_datetime(d):
"""Converts a dict of json primitives to a datetime object."""
return datetime.datetime.strptime(d["isostr"], _DATETIME_FORMAT)
def _register_json_primitive(object_type, encoder, decoder):
"""Extend what MR can json serialize.
Args:
object_type: type of the object.
encoder: a function that takes in an object and returns a dict of
json primitives.
decoder: inverse function of encoder.
"""
global _TYPE_TO_ENCODER
global _TYPE_NAME_TO_DECODER
if object_type not in _TYPE_TO_ENCODER:
_TYPE_TO_ENCODER[object_type] = encoder
_TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
_TYPE_TO_ENCODER = {}
_TYPE_NAME_TO_DECODER = {}
_register_json_primitive(datetime.datetime,
_json_encode_datetime,
_json_decode_datetime)
# ndb.Key
def _JsonEncodeKey(o):
"""Json encode an ndb.Key object."""
return {'key_string': o.urlsafe()}
def _JsonDecodeKey(d):
"""Json decode a ndb.Key object."""
k_c = d['key_string']
if isinstance(k_c, (list, tuple)):
return ndb.Key(flat=k_c)
return ndb.Key(urlsafe=d['key_string'])
_register_json_primitive(ndb.Key, _JsonEncodeKey, _JsonDecodeKey)
class JsonMixin(object):
"""Simple, stateless json utilities mixin.
Requires class to implement two methods:
to_json(self): convert data to json-compatible datastructure (dict,
list, strings, numbers)
@classmethod from_json(cls, json): load data from json-compatible structure.
"""
def to_json_str(self):
"""Convert data to json string representation.
Returns:
json representation as string.
"""
_json = self.to_json()
try:
return json.dumps(_json, sort_keys=True, cls=JsonEncoder)
except:
logging.exception("Could not serialize JSON: %r", _json)
raise
@classmethod
def from_json_str(cls, json_str):
"""Convert json string representation into class instance.
Args:
json_str: json representation as string.
Returns:
New instance of the class with data loaded from json string.
"""
return cls.from_json(json.loads(json_str, cls=JsonDecoder))
class JsonProperty(db.UnindexedProperty):
"""Property type for storing json representation of data.
Requires data types to implement two methods:
to_json(self): convert data to json-compatible datastructure (dict,
list, strings, numbers)
@classmethod from_json(cls, json): load data from json-compatible structure.
"""
def __init__(self, data_type, default=None, **kwargs):
"""Constructor.
Args:
data_type: underlying data type as class.
default: default value for the property. The value is deep copied
fore each model instance.
**kwargs: remaining arguments.
"""
kwargs["default"] = default
super(JsonProperty, self).__init__(**kwargs)
self.data_type = data_type
def get_value_for_datastore(self, model_instance):
"""Gets value for datastore.
Args:
model_instance: instance of the model class.
Returns:
datastore-compatible value.
"""
value = super(JsonProperty, self).get_value_for_datastore(model_instance)
if not value:
return None
json_value = value
if not isinstance(value, dict):
json_value = value.to_json()
if not json_value:
return None
return datastore_types.Text(json.dumps(
json_value, sort_keys=True, cls=JsonEncoder))
def make_value_from_datastore(self, value):
"""Convert value from datastore representation.
Args:
value: datastore value.
Returns:
value to store in the model.
"""
if value is None:
return None
_json = json.loads(value, cls=JsonDecoder)
if self.data_type == dict:
return _json
return self.data_type.from_json(_json)
def validate(self, value):
"""Validate value.
Args:
value: model value.
Returns:
Whether the specified value is valid data type value.
Raises:
BadValueError: when value is not of self.data_type type.
"""
if value is not None and not isinstance(value, self.data_type):
raise datastore_errors.BadValueError(
"Property %s must be convertible to a %s instance (%s)" %
(self.name, self.data_type, value))
return super(JsonProperty, self).validate(value)
def empty(self, value):
"""Checks if value is empty.
Args:
value: model value.
Returns:
True passed value is empty.
"""
return not value
def default_value(self):
"""Create default model value.
If default option was specified, then it will be deeply copied.
None otherwise.
Returns:
default model value.
"""
if self.default:
return copy.deepcopy(self.default)
else:
return None | PypiClean |
/KalaPy-0.4.2.tar.gz/KalaPy-0.4.2/example/wiki/models.py | import os
import difflib
from docutils.core import publish_parts
from jinja2 import Markup
from kalapy import db
from kalapy.web import url_for
class Page(db.Model):
name = db.String(size=60, required=True, unique=True)
@property
def title(self):
return self.name.replace('_', ' ')
@classmethod
def by_name(cls, name):
page = cls.all().filter('name ==', name).first()
if page:
return page.revisions.all().order('-timestamp').first()
return None
@classmethod
def by_revision(cls, revision):
return Revision.get(revision)
class Revision(db.Model):
page = db.ManyToOne(Page, reverse_name='revisions')
timestamp = db.DateTime(default_now=True)
text = db.Text()
note = db.String(size=200)
@property
def name(self):
return self.page.name
@property
def title(self):
return self.page.title
@property
def time(self):
return self.timestamp.strftime('%Y-%m-%d %H:%M:%S')
def render(self):
return parse_rst(self.text)
def parse_rst(markup):
parts = publish_parts(
source=markup,
writer_name='html4css1',
settings_overrides={'_disable_config': True})
return parts['html_body']
class Pagination(object):
"""
Paginate a query object.
"""
def __init__(self, query, per_page, page, endpoint):
self.query = query
self.per_page = per_page
self.page = page
self.endpoint = endpoint
self._count = None
@property
def entries(self):
return self.query.fetch(self.per_page, (self.page - 1) * self.per_page)
@property
def has_previous(self):
return self.page > 1
@property
def has_next(self):
return self.page < self.pages
@property
def previous(self):
return url_for(self.endpoint, page=self.page - 1)
@property
def next(self):
return url_for(self.endpoint, page=self.page + 1)
@property
def count(self):
return self.query.count()
@property
def pages(self):
return max(0, self.count - 1) // self.per_page + 1 | PypiClean |
/GraphLab_Create-2.1-cp27-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.macosx_10_11_intel.macosx_10_11_x86_64.whl/graphlab/toolkits/feature_engineering/_doc_utils.py | import string
from ._feature_engineering import Transformer
import types
import sys
def func_copy(f):
"""
Make a copy of a function using the underlying function attributes.
"""
if sys.version_info.major == 2:
func_code = f.func_code
func_globals = f.func_globals
func_name = f.func_name
func_defaults = f.func_defaults
func_closure = f.func_closure
else:
func_code = f.__code__
func_globals = f.__globals__
func_name = __name__
func_defaults = f.__defaults__
func_closure = f.__closure__
return types.FunctionType(func_code, func_globals, name = func_name,
argdefs = func_defaults, closure = func_closure)
def fit(self, data):
return Transformer.fit(self, data)
def transform(self, data):
return Transformer.transform(self, data)
def fit_transform(self, data):
return Transformer.fit_transform(self, data)
def republish_docs(cls):
"""
Republish the doc-strings for fit, transform, and fit_transform.
"""
def get_doc_string(func_obj):
if sys.version_info.major == 2:
return func_obj.im_func.func_doc
else:
return func_obj.__doc__
fit_copy = func_copy(fit)
fit_copy.func_doc = get_doc_string(Transformer.fit)
setattr(cls, 'fit', add_docstring(
examples = cls._fit_examples_doc)(fit_copy))
transform_copy = func_copy(transform)
transform_copy.func_doc = get_doc_string(Transformer.transform)
setattr(cls, 'transform', add_docstring(
examples = cls._transform_examples_doc)(transform_copy))
fit_transform_copy = func_copy(fit_transform)
fit_transform_copy.func_doc = get_doc_string(Transformer.fit_transform)
setattr(cls, 'fit_transform', add_docstring(
examples = cls._fit_transform_examples_doc)(fit_transform_copy))
return cls
class _Formatter(string.Formatter):
"""
Format {strings} that are withing {brackets} as described in the doctring.
"""
def get_value(self, key, args, kwargs):
if hasattr(key,"__mod__") and key in args:
return args[key]
elif key in kwargs:
return kwargs[key]
return '{%s}' % key
def add_docstring(**format_dict):
"""
__example_start = '''
Examples
---------
'''
__create = '''
>>> import graphlab as gl
>>> url = 'https://static.turi.com/datasets/xgboost/mushroom.csv'
>>> data = graphlab.SFrame.read_csv(url)
>>> data['target'] = (data['target'] == 'e')
>>> train, test = data.random_split(0.8)
>>> model = graphlab.boosted_trees.create(train, target='label', *args, **kwargs)
'''
@add_docstring(create = __create, example_start = __example_start)
def predict(x, **kwargs):
'''
{example_start}{create}
'''
return x
"""
def add_doc_string_context(func):
wrapper = func
formatter = _Formatter()
wrapper.func_doc = formatter.format(func.func_doc, **format_dict)
return wrapper
return add_doc_string_context | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/lib/scons-4.3.0/SCons/Tool/MSCommon/sdk.py |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__doc__ = """Module to detect the Platform/Windows SDK
PSDK 2003 R1 is the earliest version detected.
"""
import os
import SCons.Errors
import SCons.Util
from .common import debug, read_reg
# SDK Checks. This is of course a mess as everything else on MS platforms. Here
# is what we do to detect the SDK:
#
# For Windows SDK >= 6.0: just look into the registry entries:
# HKLM\Software\Microsoft\Microsoft SDKs\Windows
# All the keys in there are the available versions.
#
# For Platform SDK before 6.0 (2003 server R1 and R2, etc...), there does not
# seem to be any sane registry key, so the precise location is hardcoded.
#
# For versions below 2003R1, it seems the PSDK is included with Visual Studio?
#
# Also, per the following:
# http://benjamin.smedbergs.us/blog/tag/atl/
# VC++ Professional comes with the SDK, VC++ Express does not.
# Location of the SDK (checked for 6.1 only)
_CURINSTALLED_SDK_HKEY_ROOT = \
r"Software\Microsoft\Microsoft SDKs\Windows\CurrentInstallFolder"
class SDKDefinition:
"""
An abstract base class for trying to find installed SDK directories.
"""
def __init__(self, version, **kw):
self.version = version
self.__dict__.update(kw)
def find_sdk_dir(self):
"""Try to find the MS SDK from the registry.
Return None if failed or the directory does not exist.
"""
if not SCons.Util.can_read_reg:
debug('find_sdk_dir(): can not read registry')
return None
hkey = self.HKEY_FMT % self.hkey_data
debug('find_sdk_dir(): checking registry:{}'.format(hkey))
try:
sdk_dir = read_reg(hkey)
except OSError:
debug('find_sdk_dir(): no SDK registry key {}'.format(repr(hkey)))
return None
debug('find_sdk_dir(): Trying SDK Dir: {}'.format(sdk_dir))
if not os.path.exists(sdk_dir):
debug('find_sdk_dir(): {} not on file system'.format(sdk_dir))
return None
ftc = os.path.join(sdk_dir, self.sanity_check_file)
if not os.path.exists(ftc):
debug("find_sdk_dir(): sanity check {} not found".format(ftc))
return None
return sdk_dir
def get_sdk_dir(self):
"""Return the MSSSDK given the version string."""
try:
return self._sdk_dir
except AttributeError:
sdk_dir = self.find_sdk_dir()
self._sdk_dir = sdk_dir
return sdk_dir
def get_sdk_vc_script(self,host_arch, target_arch):
""" Return the script to initialize the VC compiler installed by SDK
"""
if host_arch == 'amd64' and target_arch == 'x86':
# No cross tools needed compiling 32 bits on 64 bit machine
host_arch=target_arch
arch_string=target_arch
if host_arch != target_arch:
arch_string='%s_%s'%(host_arch,target_arch)
debug("get_sdk_vc_script():arch_string:%s host_arch:%s target_arch:%s"%(arch_string,
host_arch,
target_arch))
file=self.vc_setup_scripts.get(arch_string,None)
debug("get_sdk_vc_script():file:%s"%file)
return file
class WindowsSDK(SDKDefinition):
"""
A subclass for trying to find installed Windows SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\Microsoft SDKs\Windows\v%s\InstallationFolder'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.version
class PlatformSDK(SDKDefinition):
"""
A subclass for trying to find installed Platform SDK directories.
"""
HKEY_FMT = r'Software\Microsoft\MicrosoftSDK\InstalledSDKS\%s\Install Dir'
def __init__(self, *args, **kw):
SDKDefinition.__init__(self, *args, **kw)
self.hkey_data = self.uuid
#
# The list of VC initialization scripts installed by the SDK
# These should be tried if the vcvarsall.bat TARGET_ARCH fails
preSDK61VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvarsamd64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
SDK61VCSetupScripts = {'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\amd64\vcvarsamd64.bat',
'x86_amd64': r'bin\x86_amd64\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\x86_ia64\vcvarsx86_ia64.bat',
'ia64' : r'bin\ia64\vcvarsia64.bat'}
SDK70VCSetupScripts = { 'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvars64.bat',
'x86_amd64': r'bin\vcvarsx86_amd64.bat',
'x86_ia64' : r'bin\vcvarsx86_ia64.bat',
'ia64' : r'bin\vcvarsia64.bat'}
SDK100VCSetupScripts = {'x86' : r'bin\vcvars32.bat',
'amd64' : r'bin\vcvars64.bat',
'x86_amd64': r'bin\x86_amd64\vcvarsx86_amd64.bat',
'x86_arm' : r'bin\x86_arm\vcvarsx86_arm.bat'}
# The list of support SDKs which we know how to detect.
#
# The first SDK found in the list is the one used by default if there
# are multiple SDKs installed. Barring good reasons to the contrary,
# this means we should list SDKs from most recent to oldest.
#
# If you update this list, update the documentation in Tool/mssdk.xml.
SupportedSDKList = [
WindowsSDK('10.0A',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('10.0',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.0A',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('7.0',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK70VCSetupScripts,
),
WindowsSDK('6.1',
sanity_check_file=r'bin\SetEnv.Cmd',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = SDK61VCSetupScripts,
),
WindowsSDK('6.0A',
sanity_check_file=r'include\windows.h',
include_subdir='include',
lib_subdir={
'x86' : ['lib'],
'x86_64' : [r'lib\x64'],
'ia64' : [r'lib\ia64'],
},
vc_setup_scripts = preSDK61VCSetupScripts,
),
WindowsSDK('6.0',
sanity_check_file=r'bin\gacutil.exe',
include_subdir='include',
lib_subdir='lib',
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R2',
sanity_check_file=r'SetEnv.Cmd',
uuid="D2FF9F89-8AA2-4373-8A31-C838BF4DBBE1",
vc_setup_scripts = preSDK61VCSetupScripts,
),
PlatformSDK('2003R1',
sanity_check_file=r'SetEnv.Cmd',
uuid="8F9E5EF3-A9A5-491B-A889-C58EFFECE8B3",
vc_setup_scripts = preSDK61VCSetupScripts,
),
]
SupportedSDKMap = {}
for sdk in SupportedSDKList:
SupportedSDKMap[sdk.version] = sdk
# Finding installed SDKs isn't cheap, because it goes not only to the
# registry but also to the disk to sanity-check that there is, in fact,
# an SDK installed there and that the registry entry isn't just stale.
# Find this information once, when requested, and cache it.
InstalledSDKList = None
InstalledSDKMap = None
def get_installed_sdks():
global InstalledSDKList
global InstalledSDKMap
debug('get_installed_sdks()')
if InstalledSDKList is None:
InstalledSDKList = []
InstalledSDKMap = {}
for sdk in SupportedSDKList:
debug('trying to find SDK %s' % sdk.version)
if sdk.get_sdk_dir():
debug('found SDK %s' % sdk.version)
InstalledSDKList.append(sdk)
InstalledSDKMap[sdk.version] = sdk
return InstalledSDKList
# We may be asked to update multiple construction environments with
# SDK information. When doing this, we check on-disk for whether
# the SDK has 'mfc' and 'atl' subdirectories. Since going to disk
# is expensive, cache results by directory.
SDKEnvironmentUpdates = {}
def set_sdk_by_directory(env, sdk_dir):
global SDKEnvironmentUpdates
debug('set_sdk_by_directory: Using dir:%s'%sdk_dir)
try:
env_tuple_list = SDKEnvironmentUpdates[sdk_dir]
except KeyError:
env_tuple_list = []
SDKEnvironmentUpdates[sdk_dir] = env_tuple_list
include_path = os.path.join(sdk_dir, 'include')
mfc_path = os.path.join(include_path, 'mfc')
atl_path = os.path.join(include_path, 'atl')
if os.path.exists(mfc_path):
env_tuple_list.append(('INCLUDE', mfc_path))
if os.path.exists(atl_path):
env_tuple_list.append(('INCLUDE', atl_path))
env_tuple_list.append(('INCLUDE', include_path))
env_tuple_list.append(('LIB', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('LIBPATH', os.path.join(sdk_dir, 'lib')))
env_tuple_list.append(('PATH', os.path.join(sdk_dir, 'bin')))
for variable, directory in env_tuple_list:
env.PrependENVPath(variable, directory)
def get_sdk_by_version(mssdk):
if mssdk not in SupportedSDKMap:
raise SCons.Errors.UserError("SDK version {} is not supported".format(repr(mssdk)))
get_installed_sdks()
return InstalledSDKMap.get(mssdk)
def get_default_sdk():
"""Set up the default Platform/Windows SDK."""
get_installed_sdks()
if not InstalledSDKList:
return None
return InstalledSDKList[0]
def mssdk_setup_env(env):
debug('mssdk_setup_env()')
if 'MSSDK_DIR' in env:
sdk_dir = env['MSSDK_DIR']
if sdk_dir is None:
return
sdk_dir = env.subst(sdk_dir)
debug('mssdk_setup_env: Using MSSDK_DIR:{}'.format(sdk_dir))
elif 'MSSDK_VERSION' in env:
sdk_version = env['MSSDK_VERSION']
if sdk_version is None:
msg = "SDK version is specified as None"
raise SCons.Errors.UserError(msg)
sdk_version = env.subst(sdk_version)
mssdk = get_sdk_by_version(sdk_version)
if mssdk is None:
msg = "SDK version %s is not installed" % sdk_version
raise SCons.Errors.UserError(msg)
sdk_dir = mssdk.get_sdk_dir()
debug('mssdk_setup_env: Using MSSDK_VERSION:%s'%sdk_dir)
elif 'MSVS_VERSION' in env:
msvs_version = env['MSVS_VERSION']
debug('mssdk_setup_env:Getting MSVS_VERSION from env:%s'%msvs_version)
if msvs_version is None:
debug('mssdk_setup_env thinks msvs_version is None')
return
msvs_version = env.subst(msvs_version)
from . import vs
msvs = vs.get_vs_by_version(msvs_version)
debug('mssdk_setup_env:msvs is :%s'%msvs)
if not msvs:
debug('mssdk_setup_env: no VS version detected, bailingout:%s'%msvs)
return
sdk_version = msvs.sdk_version
debug('msvs.sdk_version is %s'%sdk_version)
if not sdk_version:
return
mssdk = get_sdk_by_version(sdk_version)
if not mssdk:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('mssdk_setup_env: Using MSVS_VERSION:%s'%sdk_dir)
else:
mssdk = get_default_sdk()
if not mssdk:
return
sdk_dir = mssdk.get_sdk_dir()
debug('mssdk_setup_env: not using any env values. sdk_dir:%s'%sdk_dir)
set_sdk_by_directory(env, sdk_dir)
#print "No MSVS_VERSION: this is likely to be a bug"
def mssdk_exists(version=None):
sdks = get_installed_sdks()
if version is None:
return len(sdks) > 0
return version in sdks
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Boutique_crawling-0.0.1-py3-none-any.whl/JIMMYCHOO.py | import requests
import pandas as pd
from bs4 import BeautifulSoup
import urllib.request
import re
#가방
def j_bags():
"""
JIMMY CHOO 한국 사이트에 접속하여 남자, 여자 가방 항목의 모든 페이지에서 데이터를 가져옵니다.
가져온 데이터를 데이터 프레임의 형태로 바꿔출력해주고,
사진 링크를 이용하여 사진 파일을 다운로드 합니다.
저장 경로를 변경하고 싶을 때에는
urllib.request.urlretrieve(url, 'bags/' + 'g' + str(idx) + '.jpg')에서
bags/ 를 다른 폴더 명으로 변경하시면 됩니다(미리 만들어져 있어야 함)
"""
#여자
total = []
for i in range(1, 9):
start = (i-1) * 40
url = "https://row.jimmychoo.com/en_KR/women/handbags/?psortd1=1&psortb1=bestMatch&sz=40&start={}&format=page-element".format(start)
response = requests.get(url)
dom = BeautifulSoup(response.content, "html.parser")
elements = dom.select(".js-grid-tile")
datas = []
for element in elements:
datas.append({
"brand" : "JIMMYCHOO",
"sex" : "Women",
"title" : element.select_one(".name-link").get("data-productname") + "(" + element.select_one(".name-link > .visuallyhidden").text + ")",
"price" : element.select_one(".standart-price > .product-standard-price").text.strip(),
"image" : 'https://row.jimmychoo.com' + element.select_one("img").get("data-main-src").split("?")[0]
})
w_bags = pd.DataFrame(datas)[["brand", "title", "price", "sex", "image"]].reset_index(drop = True)
l = []
for i in w_bags["price"]:
n = re.findall("\d+", i)
n = "".join(n)
n = int(n)
l.append(n)
w_bags["price"] = l
total.append(w_bags)
w_bags = pd.concat(total).reset_index(drop = True)
#남자
#실행
w_bags.to_csv('jimmychoo_bags.csv', index=False, encoding='utf-8')
for idx, link in enumerate(w_bags["image"]):
url = link
urllib.request.urlretrieve(url, 'bags/' + "j" + str(idx) + '.jpg')
return w_bags
#신발
def j_shoes():
"""
JIMMY CHOO 한국 사이트에 접속하여 남자, 여자 신발 항목의 모든 페이지에서 데이터를 가져옵니다.
가져온 데이터를 데이터 프레임의 형태로 바꿔출력해주고,
사진 링크를 이용하여 사진 파일을 다운로드 합니다.
저장 경로를 변경하고 싶을 때에는
urllib.request.urlretrieve(url, 'shoes/' + 'g' + str(idx) + '.jpg')에서
shoes/ 를 다른 폴더 명으로 변경하시면 됩니다(미리 만들어져 있어야 함)
"""
#여자
url = "https://row.jimmychoo.com/en_KR/women/shoes/?psortd1=1&psortb1=bestMatch&sz=40&start=0&format=page-element"
response = requests.get(url)
dom = BeautifulSoup(response.content, "html.parser")
elements = dom.select(".js-grid-tile")
datas = []
for element in elements:
datas.append({
"brand": "JIMMYCHOO",
"title": element.select_one("a").get("aria-label") + "(" + element.select_one("a > .visuallyhidden").text + ")",
"price": element.select_one(".product-standard-price").text.strip(),
"image": "https://row.jimmychoo.com" + element.select_one("img").get("src").split("?")[0],
"sex": "Women"
})
w_shoes = pd.DataFrame(datas)[["brand", "title", "price", "sex", "image"]]
l = []
for i in w_shoes["price"]:
n = re.findall("\d+", i)
n = "".join(n)
n = int(n)
l.append(n)
w_shoes["price"] = l
#남자
url = "https://row.jimmychoo.com/en_KR/men/shoes/?psortd1=1&psortb1=bestMatch&sz=40&start=0&format=page-element"
response = requests.get(url)
dom = BeautifulSoup(response.content, "html.parser")
elements = dom.select(".js-grid-tile")
datas = []
for element in elements:
datas.append({
"brand" : "JIMMYCHOO",
"title" : element.select_one(".name-link").text.strip().split("\n")[0] + ", " + element.select_one(".name-link").text.strip().split("\n")[1],
"price" : element.select_one(".product-standard-price").text.strip(),
"image" : "https://row.jimmychoo.com" + element.select_one("img").get("src").split("?")[0],
"sex" : "Men"
})
m_shoes = pd.DataFrame(datas)[["brand", "title", "price", "sex", "image"]]
p = []
for i in m_shoes["price"]:
pr = re.findall("\d+", i)
pr = "".join(pr)
pr = int(pr)
p.append(pr)
m_shoes["price"] = p
#합치기
jimmychoo_shoes_df = pd.concat([w_shoes, m_shoes]).reset_index(drop=True)
jimmychoo_shoes_df.to_csv("jimmychoo_shoes.csv", index=False, encoding='utf-8')
for idx, link in enumerate(jimmychoo_shoes_df["image"]):
url = link
urllib.request.urlretrieve(url, 'shoes/' + 'j' + str(idx) + '.jpg')
return jimmychoo_shoes_df | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/syntax/_perl.py | __author__ = "Cody Precord <[email protected]>"
__svnid__ = "$Id: _perl.py 66108 2010-11-10 21:04:54Z CJP $"
__revision__ = "$Revision: 66108 $"
#-----------------------------------------------------------------------------#
# Imports
import wx
import wx.stc as stc
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
# Perl Keywords
PERL_KW = (0, "if elseif unless else switch eq ne gt lt ge le cmp not and or "
"xor while for foreach do until continue defined undef and or "
"not bless ref BEGIN END my local our goto return last next redo "
"chomp chop chr crypt index lc lcfirst length org pack reverse "
"rindex sprintf substr uc ucfirst pos quotemet split study abs "
"atan2 cos exp hex int log oct rand sin sqrt srand spice unshift "
"shift push pop split join reverse grep map sort unpack each "
"exists keys values tie tied untie carp confess croak dbmclose "
"dbmopen die syscall binmode close closedir eof fileno getc "
"lstat print printf readdir readline readpipe rewinddir select "
"stat tell telldir write fcntl flock ioctl open opendir read "
"seek seekdir sysopen sysread sysseek syswrite truncate pack vec "
"chdir chmod chown chroot glob link mkdir readlink rename rmdir "
"symlink umask ulink utime caller dump eval exit wanarray "
"import alarm exec fork getpgrp getppid getpriority kill pipe "
"setpgrp setpriority sleep system times wait waitpid accept "
"bind connect getpeername getsockname getsockopt listen recv "
"send setsockopt shutdown socket socketpair msgctl msgget msgrcv "
"msgsnd semctl semget semop shmctl shmget shmread shmwrite "
"endhostent endnetent endprooent endservent gethostbyaddr "
"gethostbyname gethostent getnetbyaddr getnetbyname getnetent "
"getprotobyname getprotobynumber getprotoent getervbyname time "
"getservbyport getservent sethostent setnetent setprotoent "
"setservent getpwuid getpwnam getpwent setpwent endpwent "
"getgrgid getlogin getgrnam setgrent endgrent gtime localtime "
"times warn formline reset scalar delete prototype lock new "
"NULL __FILE__ __LINE__ __PACKAGE__ __DATA__ __END__ AUTOLOAD "
"BEGIN CORE DESTROY END EQ GE GT INIT LE LT NE CHECK use sub "
"elsif require getgrent ")
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [ (stc.STC_PL_DEFAULT, 'default_style'),
(stc.STC_PL_ARRAY, 'array_style'),
(stc.STC_PL_BACKTICKS, 'btick_style'),
(stc.STC_PL_CHARACTER, 'char_style'),
(stc.STC_PL_COMMENTLINE, 'comment_style'),
(stc.STC_PL_DATASECTION, 'default_style'), # STYLE ME
(stc.STC_PL_ERROR, 'error_style'),
(stc.STC_PL_HASH, 'global_style'),
(stc.STC_PL_HERE_DELIM, 'here_style'),
(stc.STC_PL_HERE_Q, 'here_style'),
(stc.STC_PL_HERE_QQ, 'here_style'),
(stc.STC_PL_HERE_QX, 'here_style'),
(stc.STC_PL_IDENTIFIER, 'default_style'),
(stc.STC_PL_LONGQUOTE, 'default_style'), # STYLE ME
(stc.STC_PL_NUMBER, 'number_style'),
(stc.STC_PL_OPERATOR, 'operator_style'),
(stc.STC_PL_POD, 'comment_style'),
(stc.STC_PL_PREPROCESSOR, 'pre_style' ),
(stc.STC_PL_PUNCTUATION, 'default_style'), # STYLE ME
(stc.STC_PL_REGEX, 'regex_style'),
(stc.STC_PL_REGSUBST, 'regex_style'),
(stc.STC_PL_SCALAR, 'scalar_style'),
(stc.STC_PL_STRING, 'string_style'),
(stc.STC_PL_STRING_Q, 'string_style'),
(stc.STC_PL_STRING_QQ, 'string_style'),
(stc.STC_PL_STRING_QR, 'string_style'),
(stc.STC_PL_STRING_QW, 'string_style'),
(stc.STC_PL_STRING_QX, 'string_style'),
(stc.STC_PL_SYMBOLTABLE, 'default_style'), # STYLE ME
(stc.STC_PL_WORD, 'keyword_style') ]
if wx.VERSION >= (2, 9, 0, 0, ''):
SYNTAX_ITEMS.append((stc.STC_PL_FORMAT, 'default_style')) #TODO
SYNTAX_ITEMS.append((stc.STC_PL_FORMAT_IDENT, 'default_style')) #TODO
SYNTAX_ITEMS.append((stc.STC_PL_SUB_PROTOTYPE, 'default_style')) #TODO
#---- Extra Properties ----#
FOLD = ("fold", "1")
FLD_COMPACT = ("fold.compact", "1")
FLD_COMMENT = ("fold.comment", "1")
FLD_POD = ("fold.perl.pod", "1")
FLD_PKG = ("fold.perl.package", "1")
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for Perl"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_PERL)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [PERL_KW]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetProperties(self):
"""Returns a list of Extra Properties to set """
return [FOLD]
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u'#']
#---- Syntax Modules Internal Functions ----#
def KeywordString(option=0):
"""Returns the specified Keyword String
@note: not used by most modules
"""
if option == synglob.ID_LANG_PERL:
return PERL_KW[1]
else:
return u''
#---- End Syntax Modules Internal Functions ----#
#-----------------------------------------------------------------------------# | PypiClean |
/HILO-MPC-1.0.3.tar.gz/HILO-MPC-1.0.3/hilo_mpc/util/data.py |
from __future__ import annotations
from copy import deepcopy
from typing import Optional, Sequence, TypeVar, Union
import warnings
import casadi as ca
import numpy as np
from ..modules.base import Base, TimeSeries
from ..modules.controller.base import Controller
from ..util.util import is_list_like
Module = TypeVar('Module', bound=Base)
Control = TypeVar('Control', bound=Controller)
Numeric = Union[int, float]
NumArray = Union[Sequence[Numeric], np.ndarray]
NumNoneArray = Union[Sequence[Optional[Numeric]], np.ndarray]
_CHIRP_CHIRP = """
*#
%## %%%
(%# %%%%%#
%% %%% %%
%% %%
%# %%
%%%#**%(*%%%%
%%%****//%#//////%%
%%****/////%/////////%%
%%***///////////////////%%
%%****/////////////////////%% %%(((%(
%%****////////((((((/////////% #%((###%%#%
%%****////////( @@@@ (///////% %%((####%% %##%*%%/**#%
%****////////(( @ @@@(///////% %%((#####%/ %%****//%%.
/%***//////////(( @@@@////////%% %#((#####%% %%/***///%%%/%
,%***////////////////////////%%%%%%, %%((######%%%%****////%% %/&
%/***//////////////////////%###(((((%%(######%%%/***/////%%% %%
%%***///////%%/////////%%###((((((((%(%%%%%%****//////%%#%% %*%
%%****/%%///////%%%%####(((((((((%%(((%****///////%%#%#%% %/% /%%%%%%
/#/, %#####(((((((((((#%##%/***//////(%%((((((((%*/%((((#######%%
/| %####((((((((((%%##%****//////%%##########%*########%%%.
/ | %#####(((((((%***%%**/////%%%############%/%#%%%%**%#
__________________________/ | %%##%%###%%*****/%%**/%%%%%%%%%%%%%%%%%%*/%**//,*%#
/ \ /% %%%%%***,,////,,,,///****//*****/***%//%,,,/%%
| chirp, chirp, chirp, ... | /% %% .%%*///,,,,,,////,,,,//,,,,*//,,%//#,%%%
\_____________________________/ %% /% %%#,,,,,,////,,,,///,,,,///,,%///%
%%% %% %%%%(///,,,,,*///,,,,(#%%%%%//%
%%%% %% ###%%%%%%%%%%%###%% %//%
# %%%% %%%#####%//%
%%%%% %///%
%%% *%////////%%%
"""
class DataSet:
""""""
def __init__(
self,
features: Sequence[str],
labels: Sequence[str],
add_time: bool = False,
properties: Optional[dict[str, Union[Numeric, dict[str, Sequence[str]]]]] = None,
plot_backend: Optional[str] = None
) -> None:
"""Constructor method"""
self._raw_data = TimeSeries(backend=plot_backend, parent=self)
self._train_data = TimeSeries(backend=plot_backend, parent=self)
self._test_data = TimeSeries(backend=plot_backend, parent=self)
n_features = len(features)
n_labels = len(labels)
vector = deepcopy(properties)
if vector is None:
vector = {}
if add_time:
if 'sampling_time' not in vector:
vector['sampling_time'] = 1.
if 'time' not in vector:
vector['time'] = {}
if 'x' not in vector:
vector['x'] = {}
if 'y' not in vector:
vector['y'] = {}
if add_time:
vector['dt'] = vector.pop('sampling_time')
vector['t'] = vector.pop('time')
vec = vector['t']
vec['values_or_names'] = ['t']
if 'description' not in vec:
vec['description'] = ['time...']
if 'labels' not in vec:
vec['labels'] = ['time']
if 'units' not in vec:
vec['units'] = ['h']
vec['shape'] = (1, 0)
vec['data_format'] = ca.DM
vec = vector['x']
vec['values_or_names'] = features
if 'description' not in vec:
vec['description'] = n_features * ['']
if 'labels' not in vec:
vec['labels'] = n_features * ['']
if 'units' not in vec:
vec['units'] = n_features * ['']
vec['shape'] = (n_features, 0)
vec['data_format'] = ca.DM
vec = vector['y']
vec['values_or_names'] = labels
if 'description' not in vec:
vec['description'] = n_labels * ['']
if 'labels' not in vec:
vec['labels'] = n_labels * ['']
if 'units' not in vec:
vec['units'] = n_labels * ['']
vec['shape'] = (n_labels, 0)
vec['data_format'] = ca.DM
names = vector.keys()
self._raw_data.setup(*names, **vector)
self._train_data.setup(*names, **vector)
self._test_data.setup(*names, **vector)
self._train_index = []
self._test_index = []
self._x_noise_added = False
self._y_noise_added = False
def __len__(self) -> int:
"""Length method"""
return self._raw_data.n_samples
def _reduce_data(
self,
method: str,
distance_threshold: Optional[Numeric] = None,
downsample_factor: Optional[int] = None
) -> (int, NumArray):
"""
:param method:
:param distance_threshold:
:param downsample_factor:
:return:
"""
if self._raw_data.is_empty():
raise ValueError("No raw data available")
data_removal = method.lower().replace(' ', '_')
if data_removal == 'euclidean_distance' and distance_threshold is None:
warnings.warn("No distance threshold supplied for data selection using Euclidean distance. "
"Applying default value of 0.5.")
distance_threshold = .5
if data_removal == 'downsample' and downsample_factor is None:
warnings.warn("No downsample factor supplied for data selection using downsampling. "
"Applying default value of 10.")
downsample_factor = 10
inputs = self._raw_data.get_by_id('x').full()
index = np.array([])
k = 0
dim = inputs.shape[1]
n_inputs = inputs.shape[1]
while True:
if data_removal == 'euclidean_distance':
euclidean_distance = np.zeros(n_inputs)
euclidean_distance[:k + 1] = distance_threshold * np.ones(k + 1)
distance = inputs[:, None, k] - inputs[:, k + 1:]
k += 1
if distance.size == 0:
break
euclidean_distance[k:] = np.linalg.norm(distance, axis=0)
index_keep = euclidean_distance >= distance_threshold
if index.size == 0:
index = np.flatnonzero(index_keep)
else:
index = index[index_keep]
elif data_removal == 'downsample':
index = np.arange(0, inputs.shape[1], downsample_factor)
break
else:
raise NotImplementedError(f"Data selection method '{method}' not implemented or recognized")
inputs = inputs[:, index_keep]
n_inputs = inputs.shape[1]
if n_inputs <= k:
break
return dim, index
def _plot_selected_data(self, label: str, index: NumArray, *args, **kwargs):
"""
:param label:
:param args:
:param kwargs:
:return:
"""
x_data = self._raw_data.to_dict(*[arg[0] for arg in args], subplots=True, suffix=label, index=index)
x_data = [value for value in x_data.values()]
y_data = self._raw_data.to_dict(*[arg[1] for arg in args], subplots=True, suffix=label, index=index)
y_data_keys = y_data.keys()
y_data = [value for value in y_data.values()]
for key, label in enumerate(y_data_keys):
x_data[key]['label'] = label
y_data[key]['label'] = label
y_data[key]['kind'] = 'scatter'
plot_kwargs = kwargs.copy()
plot_kwargs['marker'] = kwargs.get('marker', 'o')
plot_kwargs['marker_size'] = kwargs.get('marker_size')
if plot_kwargs['marker_size'] is None:
if self._raw_data.plot_backend == 'bokeh':
plot_kwargs['marker_size'] = 5
elif self._raw_data.plot_backend == 'matplotlib':
plot_kwargs['marker_size'] = 20
return self.plot_raw_data(*args, x_data=x_data, y_data=y_data, **plot_kwargs)
@property
def features(self) -> list[str]:
"""
:return:
"""
return self._raw_data.get_names('x')
@property
def labels(self) -> list[str]:
"""
:return:
"""
return self._raw_data.get_names('y')
@property
def raw_data(self) -> (NumArray, NumArray):
"""
:return:
"""
if self._x_noise_added:
feature_key = 'x_noisy'
else:
feature_key = 'x'
if self._y_noise_added:
label_key = 'y_noisy'
else:
label_key = 'y'
features = self._raw_data.get_by_id(feature_key).full()
labels = self._raw_data.get_by_id(label_key).full()
return features, labels
@property
def train_data(self) -> (NumArray, NumArray):
"""
:return:
"""
if self._x_noise_added:
feature_key = 'x_noisy'
else:
feature_key = 'x'
if self._y_noise_added:
label_key = 'y_noisy'
else:
label_key = 'y'
features = self._raw_data.get_by_id(feature_key).full()
labels = self._raw_data.get_by_id(label_key).full()
return features[:, self._train_index], labels[:, self._train_index]
@property
def test_data(self) -> (NumArray, NumArray):
"""
:return:
"""
if self._x_noise_added:
feature_key = 'x_noisy'
else:
feature_key = 'x'
if self._y_noise_added:
label_key = 'y_noisy'
else:
label_key = 'y'
features = self._raw_data.get_by_id(feature_key).full()
labels = self._raw_data.get_by_id(label_key).full()
return features[:, self._test_index], labels[:, self._test_index]
@property
def sampling_time(self) -> Optional[float]:
"""
:return:
"""
return self._raw_data.dt
dt = sampling_time
@property
def time_unit(self) -> Optional[str]:
"""
:return:
"""
if 't' in self._raw_data:
return self._raw_data.get_units('t')
return None
def add_data(
self,
features: NumArray,
labels: NumArray,
time: Optional[NumArray] = None,
feature_noise: Optional[NumArray] = None,
label_noise: Optional[NumArray] = None
) -> None:
"""
:param features:
:param labels:
:param time:
:param feature_noise:
:param label_noise:
:return:
"""
self._raw_data.add('x', features)
self._raw_data.add('y', labels)
if time is not None:
if 't' in self._raw_data:
self._raw_data.add('t', time)
else:
warnings.warn("No data array was set up for the time... No changes applied with respect to the time "
"vector")
if feature_noise is not None:
if not self._x_noise_added:
self._x_noise_added = True
self._raw_data.add('x_noise', feature_noise)
if label_noise is not None:
if not self._y_noise_added:
self._y_noise_added = True
self._raw_data.add('y_noise', label_noise)
def set_data(
self,
features: NumArray,
labels: NumArray,
time: Optional[NumArray] = None,
feature_noise: Optional[NumArray] = None,
label_noise: Optional[NumArray] = None
) -> None:
"""
:param features:
:param labels:
:param time:
:param feature_noise:
:param label_noise:
:return:
"""
self._raw_data.set('x', features)
self._raw_data.set('y', labels)
if time is not None:
if 't' in self._raw_data:
self._raw_data.set('t', time)
else:
warnings.warn("No data array was set up for the time... No changes applied with respect to the time "
"vector")
if feature_noise is not None:
if not self._x_noise_added:
self._x_noise_added = True
self._raw_data.set('x_noise', feature_noise)
if label_noise is not None:
if not self._y_noise_added:
self._y_noise_added = True
self._raw_data.set('y_noise', label_noise)
def add_noise(
self,
*args,
distribution: Union[str, Sequence[str]] = 'normal',
seed: Optional[int] = None,
**kwargs
) -> None:
"""
:param args:
:param distribution:
:param seed:
:param kwargs:
:return:
"""
if not self._x_noise_added:
self._x_noise_added = True
if not self._y_noise_added:
self._y_noise_added = True
self._raw_data.make_some_noise(*args, distribution=distribution, seed=seed, **kwargs)
def select_train_data(
self,
method: str,
distance_threshold: Optional[Numeric] = None,
downsample_factor: Optional[int] = None
) -> None:
"""
:param method:
:param distance_threshold:
:param downsample_factor:
:return:
"""
dim, index = self._reduce_data(method, distance_threshold=distance_threshold,
downsample_factor=downsample_factor)
self._train_index = index
print(f"{len(index)}/{dim} data points selected for training")
def select_test_data(
self,
method: str,
distance_threshold: Optional[Numeric] = None,
downsample_factor: Optional[int] = None
) -> None:
"""
:param method:
:param distance_threshold:
:param downsample_factor:
:return:
"""
dim, index = self._reduce_data(method, distance_threshold=distance_threshold,
downsample_factor=downsample_factor)
self._test_index = index
print(f"{len(index)}/{dim} data points selected for testing")
def plot_raw_data(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
plot_kwargs = kwargs.copy()
if self._raw_data.plot_backend == 'bokeh':
plot_kwargs["line_width"] = kwargs.get("line_width", 2)
elif self._raw_data.plot_backend == 'matplotlib':
plot_kwargs["line_width"] = kwargs.get("line_width", 1)
# NOTE: The following 2 lines will be ignored for backend 'matplotlib'
plot_kwargs["major_label_text_font_size"] = kwargs.get("major_label_text_font_size", "12pt")
plot_kwargs["axis_label_text_font_size"] = kwargs.get("axis_label_text_font_size", "12pt")
return self._raw_data.plot(*args, **plot_kwargs)
def plot_train_data(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
self._plot_selected_data('train', self._train_index, *args, **kwargs)
def plot_test_data(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
self._plot_selected_data('test', self._test_index, *args, **kwargs)
def copy(self, ignore_time: bool = False) -> 'DataSet':
"""
:param ignore_time:
:return:
"""
add_time = 't' in self._raw_data and not self._raw_data['t'].is_empty() and not ignore_time
if add_time:
properties = {
'sampling_time': self._raw_data.dt,
'time': {
'description': [self._raw_data.get_description('t')],
'labels': [self._raw_data.get_labels('t')],
'units': [self._raw_data.get_units('t')]
}
}
else:
properties = {}
properties['x'] = {
'description': self._raw_data.get_description('x'),
'labels': self._raw_data.get_labels('x'),
'units': self._raw_data.get_units('x')
}
properties['y'] = {
'description': self._raw_data.get_description('y'),
'labels': self._raw_data.get_labels('y'),
'units': self._raw_data.get_units('y')
}
data_set = DataSet(self._raw_data.get_names('x'), self._raw_data.get_names('y'), add_time=add_time,
properties=properties, plot_backend=self._raw_data.plot_backend)
features = self._raw_data.get_by_id('x')
labels = self._raw_data.get_by_id('y')
kwargs = {}
if add_time:
kwargs['time'] = self._raw_data.get_by_id('t')
feature_noise = self._raw_data.get_by_id('x_noise')
if not feature_noise.is_empty():
kwargs['feature_noise'] = feature_noise
label_noise = self._raw_data.get_by_id('y_noise')
if not label_noise.is_empty():
kwargs['label_noise'] = label_noise
data_set.set_data(features, labels, **kwargs)
return data_set
def sort(self, arg: str, order: str = 'descending') -> None:
"""
:param arg:
:param order:
:return:
"""
data = self._raw_data.get_by_name(arg)
if data is not None and not data.is_empty():
idx = np.argsort(data, axis=None)
if order == 'descending':
idx = idx[::-1]
elif order != 'ascending':
raise ValueError(f"Keyword argument order='{order}' not recognized")
for arg in self._raw_data:
data = self._raw_data.get_by_id(arg)
noise = self._raw_data.get_by_id(arg + '_noise')
self._raw_data.set(arg, data[:, idx])
if not noise.is_empty():
self._raw_data.set(arg + '_noise', noise)
def append(self, other: list['DataSet'], ignore_index: bool = False, sort: bool = True) -> 'DataSet':
"""
:param other:
:param ignore_index:
:param sort:
:return:
"""
new_data_set = self.copy(ignore_time=ignore_index)
# TODO: Add support for pandas objects
if not ignore_index:
dt = new_data_set._raw_data.dt
time_unit = new_data_set._raw_data.get_units('t')
else:
dt = None
time_unit = None
features = new_data_set._raw_data.get_names('x')
labels = new_data_set._raw_data.get_names('y')
for data_set in other:
if not ignore_index:
other_dt = data_set.dt
other_time_unit = data_set.time_unit
if dt != other_dt or time_unit != other_time_unit:
warnings.warn(f"Different sampling times for supplied data sets. The data set to be appended has a "
f"sampling time of dt='{other_dt} {other_time_unit}', but the data set to be extended"
f" has a sampling time of dt='{dt} {time_unit}'. If time information is not required "
f"in your case, ignore this message or set the flag ignore_index to True to prevent "
f"the message from being shown in future.")
if (other_dt is None and dt is not None) or (other_time_unit is None and time_unit is not None):
warnings.warn('An ambiguous data set with respect to time was supplied')
if features != data_set.features:
# TODO: Sort features of other data set, if just the order is different
raise ValueError(f"Mismatch in the features. Got {data_set.features}, expected {features}.")
if labels != data_set.labels:
# TODO: Sort labels of other data set, if just the order is different
raise ValueError(f"Mismatch in the labels. Got {data_set.labels}, expected {labels}.")
# TODO: What to do here, when training data selection was executed? Do we just ignore it?
other_features = data_set._raw_data.get_by_id('x')
other_labels = data_set._raw_data.get_by_id('y')
other_kwargs = {}
if not ignore_index:
other_t = data_set._raw_data.get_by_id('t')
if not other_t.is_empty():
other_kwargs['time'] = other_t
feature_noise = new_data_set._raw_data.get_by_id('x_noise')
other_feature_noise = data_set._raw_data.get_by_id('x_noise')
if not feature_noise.is_empty() and not other_feature_noise.is_empty():
other_kwargs['feature_noise'] = other_feature_noise
label_noise = new_data_set._raw_data.get_by_id('y_noise')
other_label_noise = data_set._raw_data.get_by_id('y_noise')
if not label_noise.is_empty() and not other_label_noise.is_empty():
other_kwargs['label_noise'] = other_label_noise
# NOTE: We ignore description, labels and units of the other data sets for now, since they should be the
# same ideally.
new_data_set.add_data(other_features, other_labels, **other_kwargs)
if sort:
new_data_set.sort('t', order='ascending')
return new_data_set
class DataGenerator:
""""""
def __init__(
self,
module: Module,
x0: Optional[Union[Numeric, NumArray]] = None,
z0: Optional[Union[Numeric, NumArray]] = None,
p0: Optional[Union[Numeric, NumArray]] = None,
use_input_as_label: bool = False
) -> None:
"""Constructor method"""
self._module = module.copy(setup=True)
if x0 is None:
x0 = module.x0
if x0 is None:
raise RuntimeError("No initial dynamical states found. Please supply initial dynamical states or set "
"initial conditions of the model before generating data!")
if z0 is None:
z0 = module.z0
if module.n_z > 0 and z0 is None:
raise RuntimeError("No initial algebraic states found. Please supply initial algebraic states or set "
"initial conditions of the model before generating data!")
if p0 is None:
if 'p' not in module.solution or module.solution.get_by_id('p').is_empty():
p0 = None
else:
p0 = module.solution.get_by_id('p:0')
if module.n_p > 0 and p0 is None:
raise RuntimeError("No parameter values found. Please supply parameter values or set values for the "
"parameters of the model before generating data!")
self._module.set_initial_conditions(x0, z0=z0)
if p0 is not None:
self._module.set_initial_parameter_values(p0)
self._n_inputs = self._module.n_u
self._samples = None
self._control_loop = None
self._use_input_as_label = use_input_as_label
self._data_set = None
@staticmethod
def _linear_chirp(
t: NumArray,
chirp_rate: Numeric,
initial_phase: Optional[Numeric] = None,
initial_frequency: Optional[Numeric] = None
) -> np.ndarray:
"""
:param t:
:param chirp_rate:
:param initial_phase:
:param initial_frequency:
:return:
"""
if initial_phase is None:
initial_phase = np.pi / 2.
if initial_frequency is None:
initial_frequency = .0001
return np.sin(initial_phase + 2. * np.pi * (chirp_rate / 2. * t + initial_frequency) * t)
@staticmethod
def _exponential_chirp(
t: NumArray,
chirp_rate: Numeric,
initial_phase: Optional[Numeric] = None,
initial_frequency: Optional[Numeric] = None
) -> np.ndarray:
"""
:param t:
:param chirp_rate:
:param initial_phase:
:param initial_frequency:
:return:
"""
# TODO: Should we use other values here?
if initial_phase is None:
initial_phase = np.pi / 2.
if initial_frequency is None:
initial_frequency = .0001
return np.sin(initial_phase + 2. * np.pi * initial_frequency * (chirp_rate ** t - 1) / np.log(chirp_rate))
@staticmethod
def _hyperbolic_chirp(
t: NumArray,
dt: Numeric,
initial_frequency_ratio: Numeric,
initial_phase: Optional[Numeric] = None,
initial_frequency: Optional[Numeric] = None
) -> np.ndarray:
"""
:param t:
:param dt:
:param initial_frequency_ratio:
:param initial_phase:
:param initial_frequency:
:return:
"""
# TODO: Should we use other values here?
if initial_phase is None:
initial_phase = np.pi / 2.
if initial_frequency is None:
initial_frequency = .0001
fraction = 1. - 1. / initial_frequency_ratio
return np.sin(initial_phase - 2. * np.pi * dt * initial_frequency / fraction * np.log(1. - fraction / dt * t))
def _add_noise(self, index: NumArray, shape: tuple[int, int], **kwargs) -> None:
"""
:param index:
:param shape:
:param kwargs:
:return:
"""
noise = np.zeros(shape)
noise_added = False
for k, x_name in enumerate(self._module.dynamical_state_names):
if k not in index:
continue
x_noise = kwargs.get(x_name)
if x_noise is not None:
distribution, seed, info = _get_distribution_information(**x_noise)
if seed is not None:
np.random.seed(seed)
if distribution == 'random_uniform':
noise[index[k], :] = np.random.uniform(low=info[0], high=info[1], size=(1, shape[1]))
elif distribution == 'random_normal':
noise[index[k], :] = np.random.normal(loc=0., scale=info, size=(1, shape[1]))
else:
raise ValueError(f"Distribution '{distribution}' not available/recognized for adding noise to "
f"generated data")
if not noise_added:
noise_added = True
if not noise_added:
distribution, seed, info = _get_distribution_information(**kwargs)
if seed is not None:
np.random.seed(seed)
if distribution == 'random_uniform':
noise[index, :] = np.random.uniform(low=info[0], high=info[1], size=(len(index), shape[1]))
elif distribution == 'random_normal':
noise[index, :] = np.random.normal(loc=0., scale=info, size=(len(index), shape[1]))
else:
raise ValueError(f"Distribution '{distribution}' not available/recognized for adding noise to "
f"generated data")
self._samples = noise
@property
def data(self) -> Optional[DataSet]:
"""
:return:
"""
return self._data_set
def random_uniform(
self,
n_samples: int,
steps: int,
lower_bound: Union[Numeric, NumArray],
upper_bound: Union[Numeric, NumArray],
seed: Optional[int] = None
) -> None:
"""
:param n_samples:
:param steps:
:param lower_bound:
:param upper_bound:
:param seed:
:return:
"""
if seed is not None:
np.random.seed(seed)
samples = np.random.uniform(low=lower_bound, high=upper_bound, size=(n_samples, self._n_inputs))
samples = np.repeat(samples, steps, axis=0)
self._samples = samples.T
def random_normal(
self,
n_samples: int,
steps: int,
mean: Union[Numeric, NumArray],
variance: Union[Numeric, NumArray],
seed: Optional[int] = None
) -> None:
"""
:param n_samples:
:param steps:
:param mean:
:param variance:
:param seed:
:return:
"""
if seed is not None:
np.random.seed(seed)
samples = np.random.normal(loc=mean, scale=np.sqrt(variance), size=(n_samples, self._n_inputs))
samples = np.repeat(samples, steps, axis=0)
self._samples = samples.T
def chirp(
self,
type_: Union[str, Sequence[str]],
amplitude: Union[Numeric, NumArray],
length: Union[Numeric, NumArray],
mean: Union[Numeric, NumArray],
chirp_rate: Union[Numeric, NumArray],
initial_phase: Optional[Union[Numeric, NumNoneArray]] = None,
initial_frequency: Optional[Union[Numeric, NumNoneArray]] = None,
initial_frequency_ratio: Optional[Union[Numeric, NumArray]] = None
) -> None:
"""
:param type_:
:param amplitude:
:param length:
:param mean:
:param chirp_rate:
:param initial_phase:
:param initial_frequency:
:param initial_frequency_ratio:
:return:
"""
print(_CHIRP_CHIRP)
if not is_list_like(type_):
type_ = [[type_] for _ in range(self._n_inputs)]
if not is_list_like(amplitude):
amplitude = [[amplitude] for _ in range(self._n_inputs)]
if not is_list_like(length):
length = [[length] for _ in range(self._n_inputs)]
if not is_list_like(mean):
mean = [[mean] for _ in range(self._n_inputs)]
if not is_list_like(chirp_rate):
chirp_rate = [[chirp_rate] for _ in range(self._n_inputs)]
if not is_list_like(initial_phase):
initial_phase = [[initial_phase] for _ in range(self._n_inputs)]
if not is_list_like(initial_frequency):
initial_frequency = [[initial_frequency] for _ in range(self._n_inputs)]
if not is_list_like(initial_frequency_ratio):
initial_frequency_ratio = [[initial_frequency_ratio] for _ in range(self._n_inputs)]
dt = self._module.solution.dt
samples = []
for i in range(self._n_inputs):
ti = type_[i]
ai = amplitude[i]
li = length[i]
mi = mean[i]
ci = chirp_rate[i]
phi = initial_phase[i]
fri = initial_frequency[i]
rati = initial_frequency_ratio[i]
if not is_list_like(ti):
ti = [ti]
if not is_list_like(ai):
ai = [ai]
if not is_list_like(li):
li = [li]
if not is_list_like(mi):
mi = [mi]
if not is_list_like(ci):
ci = [ci]
if not is_list_like(phi):
phi = [phi]
if not is_list_like(fri):
fri = [fri]
if not is_list_like(rati):
rati = [rati]
lens = [len(ti), len(ai), len(li), len(mi), len(ci), len(phi), len(fri), len(rati)]
n_chirps = max(lens)
if any(k != n_chirps and k != 1 for k in lens):
mismatch = []
if lens[0] != 1:
mismatch.append(f'types ({lens[0]})')
if lens[1] != 1:
mismatch.append(f'amplitudes ({lens[1]})')
if lens[2] != 1:
mismatch.append(f'lengths ({lens[2]})')
if lens[3] != 1:
mismatch.append(f'means ({lens[3]})')
if lens[4] != 1:
mismatch.append(f'chirp rates ({lens[4]})')
if lens[5] != 1:
mismatch.append(f'initial phases ({lens[5]})')
if lens[6] != 1:
mismatch.append(f'initial frequencies ({lens[6]})')
if lens[7] != 1:
mismatch.append(f'initial frequency ratios ({lens[7]})')
raise ValueError(f"Dimension mismatch between {', '.join(mismatch[:-1])} and {mismatch[-1]}")
if lens[0] != n_chirps:
ti *= n_chirps
if lens[1] != n_chirps:
ai *= n_chirps
if lens[2] != n_chirps:
li *= n_chirps
if lens[3] != n_chirps:
mi *= n_chirps
if lens[4] != n_chirps:
ci *= n_chirps
if lens[5] != n_chirps:
phi *= n_chirps
if lens[6] != n_chirps:
fri *= n_chirps
if lens[7] != n_chirps:
rati *= n_chirps
ui = []
for j in range(n_chirps):
t = np.arange(0., li[j], dt)
tij = ti[j].lower()
if tij == 'linear':
chirp = self._linear_chirp(t, ci[j], initial_phase=phi[j], initial_frequency=fri[j])
elif tij == 'exponential':
# TODO: Test this
chirp = self._exponential_chirp(t, ci[j], initial_phase=phi[j], initial_frequency=fri[j])
elif tij == 'hyperbolic':
# TODO: Test this
ratij = rati[j]
if ratij is None:
raise ValueError("Initial frequency ratio for hyperbolic chirp signal was not supplied")
chirp = self._hyperbolic_chirp(t, dt, ratij, initial_phase=phi[j], initial_frequency=fri[j])
else:
raise ValueError(f"Type '{ti[j]}' not recognized for chirp signal")
signal = mi[j] + ai[j] * chirp
ui.append(signal)
ui = np.concatenate(ui)
samples.append(ui)
samples = np.concatenate([samples], axis=1)
self._samples = samples
def closed_loop(self, controller: Control, steps: int) -> None:
"""
:param controller:
:param steps:
:return:
"""
controller_is_mpc = controller.type == 'NMPC'
def run() -> None:
"""
:return:
"""
self._module.reset_solution()
solution = self._module.solution
x0 = solution.get_by_id('x:0')
for _ in range(steps):
if controller_is_mpc:
state_names = controller._model_orig.dynamical_state_names
ind_states = [self._module.dynamical_state_names.index(name) for name in state_names]
u = controller.optimize(x0[ind_states])
self._module.simulate(u=u)
x0 = solution.get_by_id('x:f')
self._samples = steps
self._control_loop = run
def run(
self,
output: str,
skip: Optional[Sequence[int]] = None,
shift: int = 0.,
add_noise: Optional[Union[dict[str, Numeric], dict[str, dict[str, Numeric]]]] = None
) -> None:
"""
:param output:
:param skip:
:param shift:
:param add_noise:
:return:
"""
# TODO: Support for algebraic states and parameters
if self._control_loop is not None:
self._control_loop()
n_data_points = self._samples
else:
self._module.reset_solution()
n_data_points = self._samples.shape[1]
self._module.simulate(u=self._samples, steps=n_data_points)
if skip is None:
skip = []
keep = [k for k in range(self._module.n_x) if k not in skip]
t = self._module.solution.get_by_id('t')
x = self._module.solution.get_by_id('x')
u = self._module.solution.get_by_id('u')
time = t[shift + 1:].full()
if not self._use_input_as_label:
if output == 'difference':
outputs = np.diff(x, axis=1)[keep, shift:]
features = [name for index, name in enumerate(self._module.dynamical_state_names) if
index in keep] + self._module.input_names
labels = ['delta_' + name for index, name in enumerate(self._module.dynamical_state_names) if
index in keep]
label_description = ['difference of ' + text for index, text in
enumerate(self._module.dynamical_state_description) if index in keep]
else: # output == 'absolute'
outputs = x[keep, shift + 1:].full()
features = [name + '_k' for index, name in enumerate(self._module.dynamical_state_names) if
index in keep] + self._module.input_names
labels = [name + '_k+1' for index, name in enumerate(self._module.dynamical_state_names) if
index in keep]
label_description = [text for index, text in enumerate(self._module.dynamical_state_description) if
index in keep]
inputs = np.concatenate([x[keep, k:-(shift + 1 - k)] for k in range(shift + 1)] + [u[:, shift:]], axis=0)
feature_description = [text for index, text in enumerate(self._module.dynamical_state_description) if
index in keep] + self._module.input_description
feature_labels = [text for index, text in enumerate(self._module.dynamical_state_labels) if
index in keep] + self._module.input_labels
feature_units = [text for index, text in enumerate(self._module.dynamical_state_units) if
index in keep] + self._module.input_units
label_labels = [text for index, text in enumerate(self._module.dynamical_state_labels) if index in keep]
label_units = [text for index, text in enumerate(self._module.dynamical_state_units) if index in keep]
else:
if output == 'difference':
warnings.warn("The behavior of choosing the difference of the inputs as labels has not been tested yet."
" So strange things can happen. Should you have an example where the difference of the "
"inputs as labels is required, we would appreciate it if you sent us that example so we "
"can refine this case.")
outputs = np.diff(u, axis=1)[:, shift:]
labels = ['delta_' + name for name in self._module.input_names]
label_description = ['difference of ' + text for text in self._module.input_description]
else: # output == 'absolute'
outputs = u[:, shift:].full()
labels = self._module.input_names
label_description = self._module.input_description
inputs = np.concatenate([x[keep, k:-(shift + 1 - k)] for k in range(shift + 1)], axis=0)
features = [name for index, name in enumerate(self._module.dynamical_state_names) if index in keep]
feature_description = [text for index, text in enumerate(self._module.dynamical_state_description) if
index in keep]
feature_labels = [text for index, text in enumerate(self._module.dynamical_state_labels) if index in keep]
feature_units = [text for index, text in enumerate(self._module.dynamical_state_units) if index in keep]
label_labels = self._module.input_labels
label_units = self._module.input_units
if shift > 0:
if output == 'difference':
features = [name + '_k' if name in self._module.dynamical_state_names else name for name in features]
feature_description = [
text + ' at time point k' if text and text in self._module.dynamical_state_description else text for
text in feature_description]
for k in range(shift):
features = [name + f'_k-{k + 1}' for index, name in enumerate(self._module.dynamical_state_names) if
index in keep] + features
feature_description = [text + f' at time point k-{k + 1}' if text else text for index, text in
enumerate(self._module.dynamical_state_description) if
index in keep] + feature_description
feature_labels = [text for index, text in enumerate(self._module.dynamical_state_labels) if
index in keep] + feature_labels
feature_units = [text for index, text in enumerate(self._module.dynamical_state_units) if
index in keep] + feature_units
if add_noise is not None:
self._add_noise(keep, x.shape, **add_noise)
noise = self._samples[keep, :]
if output == 'difference':
output_noise = np.diff(noise, axis=1)[:, shift:]
else: # output == 'absolute'
output_noise = noise[:, shift + 1:]
input_noise = np.concatenate([noise[:, k:-(shift + 1 - k)] for k in range(shift + 1)] + [
np.zeros((self._module.n_u, n_data_points - shift))], axis=0)
else:
output_noise = None
input_noise = None
properties = {
'sampling_time': self._module.solution.dt,
'time': {
'description': ['time...'],
'labels': ['time'],
'units': [self._module.time_unit]
},
'x': {
'description': feature_description,
'labels': feature_labels,
'units': feature_units
},
'y': {
'description': label_description,
'labels': label_labels,
'units': label_units
}
}
self._data_set = DataSet(features, labels, add_time=True, properties=properties,
plot_backend=self._module.solution.plot_backend)
self._data_set.set_data(inputs, outputs, time=time, feature_noise=input_noise, label_noise=output_noise)
def _get_distribution_information(**kwargs) -> (str, Optional[int], Optional[Union[Numeric, NumArray]]):
"""
:param kwargs:
:return:
"""
distribution = kwargs.get('distribution')
if distribution is None:
distribution = 'random_normal'
seed = kwargs.get('seed')
info = None
if distribution == 'random_uniform':
lb = kwargs.get('lb')
if lb is None:
lb = kwargs.get('lower_bound')
ub = kwargs.get('ub')
if ub is None:
ub = kwargs.get('upper_bound')
if lb is None:
warnings.warn("No lower bound was supplied for random uniform distribution. Assuming lower bound of 0.")
lb = 0.
if ub is None:
warnings.warn("No upper bound was supplied for random uniform distribution. Assuming upper bound of 1.")
ub = 1.
info = [lb, ub]
elif distribution == 'random_normal':
std = kwargs.get('std')
if std is None:
var = kwargs.get('var')
if var is not None:
std = np.sqrt(var)
if std is None:
warnings.warn("No standard deviation was supplied for random normal distribution. Assuming a standard "
"deviation of 1.")
std = 1.
info = std
return distribution, seed, info | PypiClean |
/Frontiersman-2.0.2-py3-none-any.whl/frontiersman/client/CardDiscarder.py | import pygame
import pygame_gui as pg_g
from frontiersman.client.GuiConstants import CARD_SIZE, SPACING
class CardDiscarder:
def __init__(self, rect, cards, manager, callback, layer_height, container=None):
rect = pygame.Rect(rect.left, rect.top - rect.height, rect.width, rect.height * 2)
self.manager = manager
self.cards = sorted(cards)
self.selected_cards = [False for _ in self.cards]
self.num_discard_cards = 0
self.card_elements = []
self.callback = callback
self.panel = pg_g.elements.UIPanel(
relative_rect=rect,
starting_layer_height=layer_height,
manager=self.manager,
container=container,
)
area_width = self.panel.relative_rect.width - CARD_SIZE[0] - 8
if len(self.cards) == 1:
offset = 0
else:
offset = min(area_width / (len(self.cards) - 1), CARD_SIZE[0] + SPACING)
start = (area_width - offset * (len(self.cards) - 1)) / 2
for i, card in enumerate(self.cards):
self.card_elements.append(pg_g.elements.UIButton(
relative_rect=pygame.Rect((start + i * offset, self.panel.rect.height / 2), CARD_SIZE),
text="",
manager=self.manager,
container=self.panel,
object_id="#" + card
))
if card not in ['Brick', 'Ore', 'Sheep', 'Wheat', 'Wood']:
self.card_elements[-1].disable()
else:
self.num_discard_cards += 1
self.num_discard_cards //= 2
self.submit_button = pg_g.elements.UIButton(
relative_rect=pygame.Rect((-CARD_SIZE[0], 0), CARD_SIZE),
text="submit",
manager=self.manager,
container=self.panel,
anchors={'left': 'right',
'right': 'right',
'top': 'top',
'bottom': 'top'}
)
self.submit_button.disable()
def kill(self):
self.panel.kill()
def handle_ui_button_pressed(self, event):
element = event.ui_element
if element in self.card_elements:
index = self.card_elements.index(element)
self.selected_cards[index] = not self.selected_cards[index]
if self.selected_cards[index]:
element.rect.top -= self.panel.rect.height / 2
else:
element.rect.top += self.panel.rect.height / 2
element.rebuild()
count = 0
for value in self.selected_cards:
if value:
count += 1
if count == self.num_discard_cards:
self.submit_button.enable()
else:
self.submit_button.disable()
elif element == self.submit_button:
clicked_cards = []
for i, value in enumerate(self.selected_cards):
if value:
clicked_cards.append(self.cards[i])
self.callback(clicked_cards) | PypiClean |
/ImageD11-1.9.9.tar.gz/ImageD11-1.9.9/README.md |
ImageD11 is a python code for identifying individual grains in spotty area detector X-ray diffraction images.
Version 1.9.8, Jon Wright, [email protected]
This is the source code for ImageD11. Probably you wanted a compiled version.
If your pip is up-to-date, you can try to install it like this (numpy is needed
to compile):
```
python -m pip install --upgrade pip setuptools
python -m pip install numpy
python -m pip install ImageD11
```
To get all the possible dependencies too, you can try:
`python -m pip install ImageD11[full]`
Some (dated) documentation is here: https://imaged11.readthedocs.io/
If you are at ESRF on an old linux computer you can try "module load fable".
To use from git, try this:
- Download and install python 3.7+, perhaps from www.python.org but probably from conda.
- Preload binary packages from conda (or your system package manager):
numpy, scipy, matplotlib, h5py, pillow, pycifrw, xfab, pyqt, pillow, silx[full] etc
- `pip install git+https://github.com/FABLE-3DXRD/ImageD11.git`
If you want to work with the sources then you can try like this:
```
$ python -m pip install --upgrade pip
$ git clone https://github.com/FABLE-3DXRD/ImageD11.git && cd ImageD11
$ python -m pip install --editable .
```
If you want multiple binaries in your home (on recent pythons) you can do and get the compiled code
for each platform in .so files that are labelled by platform. This is potentially useful for a
heterogenous cluster (like at ESRF):
```
# on ppc64le:
python3 -m pip install dist/ImageD11-1.9.8-cp38-cp38-linux_ppc64le.whl --user --ignore-installed
# on x86_64:
python3 -m pip install dist/ImageD11-1.9.8-cp38-cp38-linux_x86_64.whl --user --ignore-installed
# etc
# ~/.local/lib/python3.8/site-packages/ImageD11 % ls *.so
_cImageD11.cpython-38-powerpc64le-linux-gnu.so _cImageD11.cpython-38-x86_64-linux-gnu.so
```
After it is installed, you should find a script ImageD11_gui.py, somewhere in your path.
Until 2017 this code was mostly developed on sourceforge at http://sourceforge.net/projects/fable/
It is now developed at http://github.com/FABLE-3DXRD/ImageD11
Bug reports are always welcome!
Good luck!
## CI Status
Windows: [](https://ci.appveyor.com/project/jonwright/imaged11)
Linux: [](https://circleci.com/gh/jonwright/ImageD11)
Macos + Linux [](https://travis-ci.com/jonwright/ImageD11)
| PypiClean |
/Nevow-0.14.5.tar.gz/Nevow-0.14.5/NEWS.rst | Nevow 0.14.4 (2018-06-13)
=========================
Bugfixes
--------
- Divmod.Runtime.getAttribute now works again on newer Microsoft Edge versions.
(#102)
Nevow 0.14.3 (2017-07-26)
=========================
Bugfixes
--------
- Athena will now time requests out client-side rather than waiting forever (up
to the browser timeout, at least) for a server response that may never come.
(#98)
Misc
----
- #96
Nevow 0.14.2 (2016-08-29)
=========================
Re-release of 0.14.2 due to a release engineering mistake.
No changes other than the version number.
Nevow 0.14.1 (2016-08-29)
=========================
Features
--------
- Nevow will now correctly map the MIME type of SVG files even if the
platform registry does not have such a mapping. (#88)
- Athena no longer logs widget instantiation on initial page load.
(#92)
Bugfixes
--------
- Nevow's test suite is now compatible with Twisted 16.3. (#82)
- Athena will no longer cause spurious errors resulting from page
disconnection. (#84)
- Athena will now ignore responses to already-responded remote calls
during page shutdown. (#86)
Improved Documentation
----------------------
- Nevow's NEWS file is now generated from news fragments by towncrier.
(#81)
0.14.0 (2016-05-08):
- Fixed compatibility with Twisted 16.1.
- nevow.page rendering speed was increased by about 7% on CPython (2.7.11)
and 58% on PyPy (5.0.1).
0.13.0 (2016-02-16):
- nevow.appserver.Request.headers and received_headers are now deprecated to
follow suit with Twisted; older versions of Nevow will not be compatible
with Twisted 16.0.0 due to these being removed in Twisted.
- nevow.testutil.FakeRequest had similar changes made to assist with
compatibility in test code, and there should be no Nevow code left that
touches the deprecated APIs, but any application code using the old APIs
should be migrated.
- Some very ancient, deprecated things were removed: nevow.canvas,
nevow.livepage, nevow.livetest, nevow.taglibrary (except for the Athena
version of nevow.taglibrary.tabbedPane), nevow.wsgi, and zomne.
0.12.0 (2015-12-16):
- Responses are no longer finished if the connection went away, avoiding an
error message that this used to cause.
- Detach all children on detach (previously only every other child was
detached due to a bug).
- The documentation is now in Sphinx format instead of Lore format.
- Nevow templates now have attributes serialized in a stable order (the
ordering is lexicographic).
- The Athena runtime now works on Internet Explorer versions higher than 7.
- Athena can now handle messages containing `undefined` (this deserializes to
`None`, same as `null`; previously an error would occur).
- Athena no longer logs about receiving duplicate messages (there is nothing
the user / developer can do about these, so the log message didn't serve
any purpose other than flooding your logs).
- connectionMade on Athena widgets is now always invoked before any remote
calls are handled (previously some remote calls might be handled before
connectionMade was invoked).
0.11.1 (2014-06-21):
- The athena-widget twistd plugin is now distributed in the wheel package.
0.11.0 (2014-06-20):
- nevow.json now always emits escaped forms of U+2028 and U+2029.
- formless now works with recent versions of zope.interface without
triggering warnings.
- Several problems in the test suite which resulted in failing tests when
using recent versions of Twisted are now fixed.
- The JavaScript API Divmod.UnitTest.TestCase.assertThrows now accepts
variable arguments to be passed on to the API under test.
- Nevow now unconditionally depends on setuptools for packaging.
- Nevow now uses python-versioneer for version management.
- Nevow now requires Twisted 13.0 or newer.
- The dangerous testing helper module nevow.test.segfault has been removed.
- Nevow is now hosted on Github: https://github.com/twisted/nevow
- Nevow now uses travis-ci for continuous integration:
https://travis-ci.org/twisted/nevow
0.10.0 (2009-11-25):
- Added a system for CSS dependency declarations similar to the one in
Athena for JavaScript.
- Fix Athena's transport cleanup on page unload in Internet Explorer.
- Fix nit's results coloring in Internet Explorer.
- Added an API for declaring JavaScript classes which involves less
repetition than the existing Divmod.Class.subclass API.
- Added human-readable formatting for the new flattener's error reporting;
rendering error stacks will now display lines from Python code as well
as stan and XML templates.
- Override the setuptools sdist command with the original distutils sdist
command to avoid setuptools' version number transformation.
- Added support for default values for slots in XML templates.
- Fixed a problem with setup.py which led to css files not being
installed.
- Removed the old Chatola example and replaced it with a link to the new
chat example.
- Sped up some of the JavaScript dependency calculations.
0.9.33 (2008-12-09):
- Add error handling to the integration between the old flattener
and the new flattener so that if the new flattener fails with an
exception or a Failure the error is propagated properly to the old
flattener which invoked it.
- Changed nit so that it doesn't use private `twistd` APIs and
instead just sets up a server and runs the reactor. This makes
nit work with all versions of Twisted supported by Nevow.
- Changed Nevow's setup.py to use setuptools if setuptools is
available. This has the user-facing consequence of installing
Nevow as an egg if setuptools is available at installation time
and of making Nevow installable using the `easy_install´ tool.
- TabbedPane naively set DOM attributes, making it unusable in
Internet Explorer 6 and 7. Introduced a reliable method for
setting DOM node attributes, with name mangling, to address the
issue.
0.9.32 (2008-08-12):
- A resource wrapper for on-the-fly gzip compression has been added.
- A twistd plugin, 'athena-widget', is now available for serving
single Athena widgets.
- Basic Athena support for Safari added.
- Added file name, line number, and column number information to
slots and tags parsed from XML files in order to make debugging
template/renderer interactions simpler.
- A context-free flattener has been added. Fragment and its
subclasses are now deprecated in favor of Element.
- Javascript classes derived from the tabbedpane class can now
override how tab selection is handled.
0.9.31 (2008-02-06):
- Fixed Guard's request parameter save/restore feature to not
clobber request state after login succeeds when a session has
already been negotiated.
- Added a hook to nevow.guard.SessionWrapper which allows the
domain parameter of the session cookie to be specified.
0.9.30 (2008-01-16):
- Change DeferredSerializer so that it passes failures from the
Deferred being serialized on to the Deferred returned by the
flattening function. Without this behavior, the Deferred
returned by the flattening function is never fired when a
Deferred which fails is serialized.
0.9.29 (2008-01-02):
- Prevent NevowSite.handleSegment from raising IndexError in certain
situations.
- Deprecated wsgi and zomne modules.
0.9.28 (2007-12-10):
- Added two APIs to Athena, one for creating the string used as the id
attribute of the top node of a widget and one for creating the string
used as the id attribute of a node which had an id attribute in the
widget's template document.
0.9.27 (2007-11-27):
- Unicode URLs now supported.
0.9.26 (2007-11-02):
- url.URL.path now correctly escapes segments in the string it
evaluates to.
- inevow.IAthenaTransportable added, along with support for
serialization of custom types for server-to-client Athena
messages.
- Global client-side behaviour is now customizable via a client
PageWidget class.
0.9.25 (2007-10-16):
- The Athena message queue implementation has been improved, fixing problems
masked by bugs in Firebug/YSlow.
0.9.24 (2007-09-05):
- ESC key no longer disconnects Athena connections.
- Fixed a bug where URLs with quote characters will cause the Athena
connection to be lost.
- Fixed 'twistd athena-widget' to create a fresh widget instance for each
hit.
0.9.23 (2007-08-01):
- Fixed install script to include all JavaScript files.
0.9.22 (2007-07-06):
- Mock DOM implementation for easier browser testing added.
- JavaScript source files are now read using universal newlines mode.
- athena.AutoJSPackage now excludes dotfiles.
- url.URL now properly subclassable.
- User-agent parsing added to Athena, to detect known-unsupported browsers.
0.9.21 (2007-06-06):
- Debug logging messages from the reliable message delivery queue
disabled.
0.9.20 (2007-05-24):
- Athena now no longer holds more than one idle transport open to
the browser.
0.9.19 (2007-04-27):
- Changed the styling of the progressbar to work on IE6.
- Athena.Widget.detach added, to allow widgets to cleanly be removed
from a page.
- Athena.Widget.callLater added, a wrapper around setTimeout and
clearTimeout.
- 'athena-widget' twistd command added, for starting a server which
serves a single LiveFragment or LiveElement.
0.9.18 (2007-02-23):
- Athena 'connection lost' notification now styleable via the
'nevow-connection-lost' CSS class.
- The 'runjstests' script has been removed, now that JS tests can be
run with trial.
0.9.17 (2006-12-08):
- More efficient JSON string parsing.
- Give FakeRequests a default status code of OK. Accept all of
FakeRequest.__init__'s arguments in the __init__ of
AccumulatingFakeRequest.
0.9.16 (2006-11-17):
- Updated nit to work with Twisted trunk.
- Athena module import caching has been fixed.
0.9.15 (2006-11-08):
- Changed _LiveMixin rendering to be idempotent to support the case
where a transport hiccup causes a LiveFragment or LiveElement to
be sent to the browser multiple times.
- Improvements to the tests.
0.9.14 (2006-10-31):
- Support code for running non-browser javascript tests has been added.
- Added a workaround for nodeById on widgets not yet added to the document in
IE.
- Athena will now invoke the nodeInserted method (if it exists) on a widget
that it instantiates statically.
- ID rewriting, similar to existing rewriting support for 'id' attributes,
has been added in 'for' and 'headers' attributes of 'label' and 'td'/'th'
elements, respectively.
0.9.13 (2006-10-21):
- Adjust non-selected panes in tabbedpane to be further out of the viewport.
- Convert to using the Javascript module plugin system for Nevow-provided
modules.
0.9.12 (2006-10-17):
- Added id rewriting for LiveElement and LiveFragment, such that id
attributes in a widget template are rewritten so that they are unique to
the widget instance. A client-side API, Nevow.Athena.Widget.nodeById(),
is provided to allow location of these nodes.
0.9.11 (2006-10-10):
- Fixed dynamic widget instantiation in IE.
- Added support for correctly quoting the values of slots which are used as
attributes.
0.9.10 (2006-10-05):
- Minor update to nevow.testutil.
0.9.9 (2006-09-26):
- Several nit changes, including the addition of the "check" method to
Failure, and the addition of an "assertFailure" method.
- The ability to pass Python exceptions to Javascript has been added to
Athena.
- Dynamic module import has been added for the cases where it is necessary
to dynamically add a widget to an existing page.
0.9.8 (2009-09-20):
- A bug in nit that caused it to fail if there were too many tests in a
test case, and swallow failures in some cases, has been fixed.
- Widgets can no longer be added to a page after render time using
Divmod.Runtime.Platform.{set,append}NodeContent. Instead, they must be
added using Nevow.Athena.Widget.addChildWidgetFromWidgetInfo.
0.9.7 (2009-09-12):
- Automatic Athena event handler registration is fixed for all supported browsers
and is no longer document-sensitive (ie, it works inside tables now).
- Nit has gained a new assertion method, assertIn.
0.9.6 (2008-08-30):
- Fixed a bug in the IE implementation of the runtime.js node fetching
functions.
0.9.5 (2006-08-22):
- Instance attributes can now be exposed to Athena with nevow.utils.Expose
and Expose.exposedMethodNames() no longer returns unexposed names.
0.9.4 (2006-08-14):
- Added test method discovery to nit test cases, so multiple test methods
may be put in a single test case.
- use XPath for certain DOM traversals when available. This yields
significant speedups on Opera.
- Made Divmod.Runtime.Platform.getAttribute deal with IE attribute
name-mangling properly.
- Javascript logging is now done in Firebug 0.4 style rather than 0.3.
- Some cases where Deferred-returning render methods raised
exceptions or buried failures were fixed.
- Removed MochiKit. The pieces Nevow depends on have been moved to
Divmod.Base in nevow/base.js.
- Various doc fixes.
0.9.3 (2006-07-17):
- Page rendering now supports preprocessors.
0.9.2 (2006-07-08):
- Fixes to the typeahead demo.
- Elements are now automatically serialized by json, just like Fragments.
0.9.1 (2006-07-05):
- Made nevow.athena.expose the mandatory means of publishing a method to
the browser. The allowedMethods dictionary will no longer be respected.
- Added nevow.page.Element and nevow.athena.LiveElement: these are
preferred over nevow.rend.Fragment and nevow.athena.LiveFragment for all
new development.
0.9.0 (2006-06-12):
- Fixed a bug where nested fragment sending rarely worked.
- Sending large strings in Athena arguments and results is now faster due to
less unnecessary unicode character quoting.
- Module objects are now automatically created for all Athena imports.
- Better error reporting for fragments which are rendered without a parent.
- Disconnect notifiers in Athena pages will no longer clobber each other.
- Many optimizations to javascript initialization.
- Javascript packages are now defined with less boilerplate: a filesystem
convention similar to Python's for module naming, plus one declaration in a
Nevow plugin which indicates the directory, rather than a declaration for
each module.
- Updated README to refer to Athena rather than LivePage
| PypiClean |
/F5NoMore-0.1.0.tar.gz/F5NoMore-0.1.0/f5nomore/SimpleWebSocketServer.py | from __future__ import print_function
from __future__ import unicode_literals
import traceback
import hashlib
import base64
import socket
import struct
import ssl
import time
import sys
import errno
import logging
from select import select
from io import BytesIO
py3 = (sys.version > '3')
if py3:
import socketserver as SocketServer
from http.server import BaseHTTPRequestHandler
from io import StringIO
py3bytes = bytes
bytes = lambda s: py3bytes(s, 'utf-8')
ord = lambda i: i
else:
import SocketServer
from BaseHTTPServer import BaseHTTPRequestHandler
from StringIO import StringIO
class SocketClosed(Exception):
pass
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_text):
self.rfile = BytesIO(request_text.encode('iso-8859-1'))
self.raw_requestline = self.rfile.readline()
self.error_code = self.error_message = None
self.parse_request()
class WebSocket(object):
handshakeStr = (
"HTTP/1.1 101 Switching Protocols\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Accept: %(acceptstr)s\r\n\r\n"
)
hixiehandshakedStr = (
"HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Origin: %(origin)s\r\n"
"Sec-WebSocket-Location: %(type)s://%(host)s%(location)s\r\n\r\n"
)
GUIDStr = b'258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
STREAM = 0x0
TEXT = 0x1
BINARY = 0x2
CLOSE = 0x8
PING = 0x9
PONG = 0xA
HEADERB1 = 1
HEADERB2 = 3
LENGTHSHORT = 4
LENGTHLONG = 5
MASK = 6
PAYLOAD = 7
def __init__(self, server, sock, address):
self.server = server
self.client = sock
self.address = address
self.handshaked = False
self.headerbuffer = ''
self.readdraftkey = False
self.draftkey = ''
self.headertoread = 2048
self.hixie76 = False
self.fin = 0
self.data = None
self.opcode = 0
self.hasmask = 0
self.maskarray = None
self.length = 0
self.lengtharray = None
self.index = 0
self.request = None
self.usingssl = False
self.state = self.HEADERB1
# restrict the size of header and payload for security reasons
self.maxheader = 65536
self.maxpayload = 4194304
def close(self):
self.client.close()
self.state = self.HEADERB1
self.hasmask = False
self.handshaked = False
self.readdraftkey = False
self.hixie76 = False
self.headertoread = 2048
self.headerbuffer = ''
self.data = ''
def handleMessage(self):
pass
def handleConnected(self):
pass
def handleClose(self):
pass
def handlePacket(self):
# close
if self.opcode == self.CLOSE:
self.sendClose()
raise SocketClosed()
# ping
elif self.opcode == self.PING:
pass
# pong
elif self.opcode == self.PONG:
pass
# data
elif self.opcode == self.STREAM or self.opcode == self.TEXT or self.opcode == self.BINARY:
self.handleMessage()
def handleData(self):
# do the HTTP header and handshake #
if self.handshaked is False:
data = self.client.recv(self.headertoread)
if data:
# accumulate
self.headerbuffer += data.decode('iso-8859-1')
if len(self.headerbuffer) >= self.maxheader:
raise Exception('header exceeded allowable size')
## we need to read the entire 8 bytes of after the HTTP header, ensure we do
if self.readdraftkey is True:
self.draftkey += self.headerbuffer
read = self.headertoread - len(self.headerbuffer)
if read != 0:
self.headertoread = read
else:
# complete hixie76 handshake
self.handshake_hixie76()
# indicates end of HTTP header
elif '\r\n\r\n' in self.headerbuffer:
self.request = HTTPRequest(self.headerbuffer)
# hixie handshake
if 'Sec-WebSocket-Key1'.lower() in self.request.headers and 'Sec-WebSocket-Key2'.lower() in self.request.headers:
# check if we have the key in our buffer
index = self.headerbuffer.find('\r\n\r\n') + 4
# determine how much of the 8 byte key we have
read = len(self.headerbuffer) - index
# do we have all the 8 bytes we need?
if read < 8:
self.headertoread = 8 - read
self.readdraftkey = True
if read > 0:
self.draftkey += self.headerbuffer[index:index+read]
else:
# get the key
self.draftkey += self.headerbuffer[index:index+8]
# complete hixie handshake
self.handshake_hixie76()
# handshake rfc 6455
elif 'Sec-WebSocket-Key'.lower() in self.request.headers:
key = self.request.headers['Sec-WebSocket-Key'.lower()].encode('iso-8859-1')
hStr = self.handshakeStr % { 'acceptstr' : base64.b64encode(hashlib.sha1(key + self.GUIDStr).digest()).decode() }
self.sendBuffer(hStr)
self.handshaked = True
self.headerbuffer = ''
try:
self.handleConnected()
except:
pass
else:
raise Exception('Sec-WebSocket-Key does not exist')
# remote connection has been closed
else:
raise SocketClosed()
# else do normal data
else:
data = self.client.recv(2048)
if data:
for val in data:
if self.hixie76 is False:
self.parseMessage(ord(val))
else:
self.parseMessage_hixie76(ord(val))
else:
raise SocketClosed()
def handshake_hixie76(self):
k1 = self.request.headers['Sec-WebSocket-Key1'.lower()]
k2 = self.request.headers['Sec-WebSocket-Key2'.lower()]
spaces1 = k1.count(" ")
spaces2 = k2.count(" ")
num1 = int("".join([c for c in k1 if c.isdigit()])) / spaces1
num2 = int("".join([c for c in k2 if c.isdigit()])) / spaces2
key = ''
key += struct.pack('>I', num1)
key += struct.pack('>I', num2)
key += self.draftkey
typestr = 'ws'
if self.usingssl is True:
typestr = 'wss'
response = self.hixiehandshakedStr % { 'type' : typestr, 'origin' : self.request.headers['Origin'.lower()], 'host' : self.request.headers['Host'.lower()], 'location' : self.request.path }
self.sendBuffer(response)
self.sendBuffer(hashlib.md5(key).digest())
self.handshaked = True
self.hixie76 = True
self.headerbuffer = ''
try:
self.handleConnected()
except:
pass
def sendClose(self):
msg = bytearray()
if self.hixie76 is False:
msg.append(0x88)
msg.append(0x00)
self.sendBuffer(msg)
else:
pass
def sendBuffer(self, buff):
size = len(buff)
tosend = size
index = 0
while tosend > 0:
try:
# i should be able to send a bytearray
if isinstance(buff, bytearray):
sent = self.client.send(buff[index:size])
else:
sent = self.client.send(bytes(buff[index:size]))
if sent == 0:
raise RuntimeError("socket connection broken")
index += sent
tosend -= sent
except socket.error as e:
# if we have full buffers then wait for them to drain and try again
if e.errno == errno.EAGAIN:
time.sleep(0.001)
else:
raise e
#if s is a string then websocket TEXT is sent else BINARY
def sendMessage(self, s):
if self.hixie76 is False:
header = bytearray()
isString = isinstance(s, (str if py3 else basestring))
if isString is True:
header.append(0x81)
else:
header.append(0x82)
b2 = 0
length = len(s)
if length <= 125:
b2 |= length
header.append(b2)
elif length >= 126 and length <= 65535:
b2 |= 126
header.append(b2)
header.extend(struct.pack("!H", length))
else:
b2 |= 127
header.append(b2)
header.extend(struct.pack("!Q", length))
if length > 0:
self.sendBuffer(header + bytes(s))
else:
self.sendBuffer(header)
header = None
else:
msg = bytearray()
msg.append(0)
if len(s) > 0:
msg.extend(str(s).encode("UTF8"))
msg.append(0xFF)
self.sendBuffer(msg)
msg = None
def parseMessage_hixie76(self, byte):
if self.state == self.HEADERB1:
if byte == 0:
self.state = self.PAYLOAD
self.data = bytearray()
elif self.state == self.PAYLOAD:
if byte == 0xFF:
self.opcode = 1
self.length = len(self.data)
try:
self.handlePacket()
finally:
self.data = None
self.state = self.HEADERB1
else :
self.data.append(byte)
# if length exceeds allowable size then we except and remove the connection
if len(self.data) >= self.maxpayload:
raise Exception('payload exceeded allowable size')
def parseMessage(self, byte):
# read in the header
if self.state == self.HEADERB1:
# fin
self.fin = (byte & 0x80)
# get opcode
self.opcode = (byte & 0x0F)
self.state = self.HEADERB2
elif self.state == self.HEADERB2:
mask = byte & 0x80
length = byte & 0x7F
if mask == 128:
self.hasmask = True
else:
self.hasmask = False
if length <= 125:
self.length = length
# if we have a mask we must read it
if self.hasmask is True:
self.maskarray = bytearray()
self.state = self.MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
# we have no mask and some payload
else:
self.index = 0
self.data = bytearray()
self.state = self.PAYLOAD
elif length == 126:
self.lengtharray = bytearray()
self.state = self.LENGTHSHORT
elif length == 127:
self.lengtharray = bytearray()
self.state = self.LENGTHLONG
elif self.state == self.LENGTHSHORT:
self.lengtharray.append(byte)
if len(self.lengtharray) > 2:
raise Exception('short length exceeded allowable size')
if len(self.lengtharray) == 2:
self.length = struct.unpack_from(b'!H', self.lengtharray)[0]
if self.hasmask is True:
self.maskarray = bytearray()
self.state = self.MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
# we have no mask and some payload
else:
self.index = 0
self.data = bytearray()
self.state = self.PAYLOAD
elif self.state == self.LENGTHLONG:
self.lengtharray.append(byte)
if len(self.lengtharray) > 8:
raise Exception('long length exceeded allowable size')
if len(self.lengtharray) == 8:
self.length = struct.unpack_from('!Q', str(self.lengtharray))[0]
if self.hasmask is True:
self.maskarray = bytearray()
self.state = self.MASK
else:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
# we have no mask and some payload
else:
self.index = 0
self.data = bytearray()
self.state = self.PAYLOAD
# MASK STATE
elif self.state == self.MASK:
self.maskarray.append(byte)
if len(self.maskarray) > 4:
raise Exception('mask exceeded allowable size')
if len(self.maskarray) == 4:
# if there is no mask and no payload we are done
if self.length <= 0:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
# we have no mask and some payload
else:
self.index = 0
self.data = bytearray()
self.state = self.PAYLOAD
# PAYLOAD STATE
elif self.state == self.PAYLOAD:
if self.hasmask is True:
self.data.append( byte ^ self.maskarray[self.index % 4] )
else:
self.data.append( byte )
# if length exceeds allowable size then we except and remove the connection
if len(self.data) >= self.maxpayload:
raise Exception('payload exceeded allowable size')
# check if we have processed length bytes; if so we are done
if (self.index+1) == self.length:
try:
self.handlePacket()
finally:
self.state = self.HEADERB1
self.data = None
else:
self.index += 1
class SimpleWebSocketServer(object):
def __init__(self, host, port, websocketclass):
self.websocketclass = websocketclass
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.serversocket.bind((host, port))
self.serversocket.listen(5)
self.connections = {}
self.listeners = [self.serversocket]
def decorateSocket(self, sock):
return sock
def constructWebSocket(self, sock, address):
return self.websocketclass(self, sock, address)
def close(self):
self.serversocket.close()
for conn in list(self.connections.values()):
try:
conn.handleClose()
except:
pass
conn.close()
def serveforever(self):
while True:
rList, wList, xList = select(self.listeners, [], self.listeners, 1)
for ready in rList:
if ready == self.serversocket:
try:
sock, address = self.serversocket.accept()
newsock = self.decorateSocket(sock)
newsock.setblocking(0)
fileno = newsock.fileno()
self.listeners.append(fileno)
self.connections[fileno] = self.constructWebSocket(newsock, address)
except Exception as n:
logging.debug(str(address) + ' ' + str(n))
if sock is not None:
sock.close()
else:
client = self.connections[ready]
fileno = client.client.fileno()
try:
client.handleData()
except Exception as n:
if(not isinstance(n, SocketClosed)):
traceback.print_exc()
try:
client.handleClose()
except:
pass
client.close()
del self.connections[fileno]
self.listeners.remove(ready)
for failed in xList:
if failed == self.serversocket:
self.close()
raise Exception("server socket failed")
else:
client = self.connections[failed]
fileno = client.client.fileno()
try:
client.handleClose()
except:
pass
client.close()
del self.connections[fileno]
self.listeners.remove(failed)
class SimpleSSLWebSocketServer(SimpleWebSocketServer):
def __init__(self, host, port, websocketclass, certfile, keyfile, version = ssl.PROTOCOL_TLSv1):
SimpleWebSocketServer.__init__(self, host, port, websocketclass)
self.cerfile = certfile
self.keyfile = keyfile
self.version = version
def close(self):
super(SimpleSSLWebSocketServer, self).close()
def decorateSocket(self, sock):
sslsock = ssl.wrap_socket(sock,
server_side=True,
certfile=self.cerfile,
keyfile=self.keyfile,
ssl_version=self.version)
return sslsock
def constructWebSocket(self, sock, address):
ws = self.websocketclass(self, sock, address)
ws.usingssl = True
return ws
def serveforever(self):
super(SimpleSSLWebSocketServer, self).serveforever() | PypiClean |
/BxModels-0.4.0-py3-none-any.whl/bxmodels/bayes/utils.py |
import math
import torch
def log_normal(x, means, covars):
"""
Computes the log-probability of the given data for multiple multivariate normal distributions
defined by their means and covariances.
Parameters:
-----------
- x: torch.Tensor [N, D]
The data to compute the log-probability for.
- means: torch.Tensor [K, D]
The means of the distributions.
- covars: torch.Tensor ([K, D] or [D] or [K])
The covariances of the distributions, depending on the covariance type. In the first case,
each distribution is assumed to have its own diagonal covariance matrix, in the second case,
the covariance matrix is shared among all distributions, and in the last one, the
covariance matrix is spherical. The type of covariance is therefore inferred by the size of
the input. If the dimension does not fit any of the documented sizes, no error will be
thrown but the result is undefined.
Returns:
--------
- torch.Tensor [N, K]
The log-probabilities for every input and distribution.
"""
num_features = x.size(1)
precisions = 1 / covars
if covars.size(0) == num_features: # shared diagonal covariance
num_means = means.size(0)
precisions = precisions.view(1, num_features).expand(
num_means, num_features
)
if precisions.dim() == 2: # diagonal covariance
cov_det = (-precisions.log()).sum(1)
x_prob = torch.matmul(x * x, precisions.t())
m_prob = torch.einsum('ij,ij,ij->i', means, means, precisions)
xm_prob = torch.matmul(x, (means * precisions).t())
else: # spherical covariance
cov_det = -precisions.log() * num_features
x_prob = torch.ger(torch.einsum('ij,ij->i', x, x), precisions)
m_prob = torch.einsum('ij,ij->i', means, means) * precisions
xm_prob = torch.matmul(x, means.t() * precisions)
constant = math.log(2 * math.pi) * num_features
log_prob = x_prob - 2 * xm_prob + m_prob
return -0.5 * (constant + cov_det + log_prob)
def log_responsibilities(log_probs, comp_priors, return_log_likelihood=False):
"""
Computes the log-responsibilities of some data based on their log-
probabilities for all components of a gaussian mixture model and the
components' weights.
Parameters:
-----------
- log_probs: torch.Tensor [N, K]
The log-probabilities of N datapoints for each of K distributions.
- comp_priors: torch.Tensor [K]
The prior probabilities for each of the K distributions.
- return_log_likelihood: bool, default: False
Whether to return the log-likelihood of observing the data given the log-probabilities and
the component priors.
Returns:
--------
- torch.Tensor [N, K]
The log-responsibilities for all datapoints and distributions.
- torch.Tensor [N]
If `return_log_likelihood` is `True`, the log-likelihood of observing each of the
datapoints ("evidence"). To compute the log-likelihood of the entire data, sum these
log-likelihoods up.
"""
posterior = log_probs + comp_priors.log()
evidence = torch.logsumexp(posterior, 1, keepdim=True)
log_resp = posterior - evidence
if return_log_likelihood:
return log_resp, evidence.sum()
return log_resp
def max_likeli_means(data, responsibilities, comp_sums=None):
"""
Maximizes the likelihood of the data with respect to the means for a gaussian mixture model.
Parameters:
-----------
- data: torch.Tensor [N, D]
The data for which to maximize the likelihood.
- responsibilities: torch.Tensor [N, K]
The responsibilities for each datapoint and component.
- comp_sums: torch.Tensor [K], default: None
The cumulative probabilities for all components. If not given, this method can be used for
e.g. implementing mini-batch GMM. The means are not at all normalized.
Returns:
--------
- torch.Tensor [K, D]
The likelihood-maximizing means.
"""
means = torch.matmul(responsibilities.t(), data)
if comp_sums is not None:
return means / comp_sums.unsqueeze(1)
return means
def max_likeli_covars(data, responsibilities, comp_sums, means, covar_type, reg=1e-6):
"""
Maximizes the likelihood of the data with respect to the covariances for a gaussian mixture
model.
Parameters:
-----------
- data: torch.Tensor [N, D]
The data for which to maximize the likelihood.
- responsibilities: torch.Tensor [N, K]
The responsibilities for each datapoint and component.
- comp_sums: torch.Tensor [K]
The cumulative probabilities for all components.
- means: torch.Tensor [K, D]
The means of all components.
- covar_type: str
The type of the covariance to maximize the likelihood for. Must be one of ('diag',
'diag-shared', 'spherical').
- reg: float, default: 1e-6
Regularization term added to the covariance to ensure that it is positive.
Returns:
--------
- torch.Tensor ([K, D] or [D] or [K])
The likelihood-maximizing covariances where the shape depends on the given covariance type.
Note:
-----
Implementation is adapted from scikit-learn.
"""
if covar_type == 'diag':
return _max_likeli_diag_covars(data, responsibilities, comp_sums, means, reg)
if covar_type == 'diag-shared':
return _max_likeli_shared_diag_covars(data, comp_sums, means, reg)
return _max_likeli_diag_covars(data, responsibilities, comp_sums, means, reg).mean(1)
def _max_likeli_diag_covars(data, responsibilities, comp_sums, means, reg):
denom = comp_sums.unsqueeze(1)
x_sq = torch.matmul(responsibilities.t(), data * data) / denom
m_sq = means * means
xm = means * torch.matmul(responsibilities.t(), data) / denom
return x_sq - 2 * xm + m_sq + reg
def _max_likeli_shared_diag_covars(data, comp_sums, means, reg):
x_sq = torch.matmul(data.t(), data)
m_sq = torch.matmul(comp_sums * means.t(), means)
return (x_sq - m_sq) / comp_sums.sum() + reg
def power_iteration(A, eps=1e-7, max_iterations=100):
"""
Computes the eigenvector corresponding to the largest eigenvalue of the given square matrix.
Parameters:
-----------
- A: torch.Tensor [N, N]
The square matrix for which to compute the eigenvector corresponding to the largest
eigenvalue.
- eps: float, default: 1e-7
Convergence criterion. When the change in the vector norm is less than the given value,
iteration is stopped.
- max_iterations: int, default: 100
The maximum number of iterations to do when the epsilon-based convergence criterion does
not kick in.
Returns:
--------
- torch.Tensor [N]
The eigenvector corresponding to the largest eigenvalue of the given square matrix.
"""
v = torch.rand(A.size(0), device=A.device)
for _ in range(max_iterations):
v_old = v
v = A.mv(v)
v = v / v.norm()
if (v - v_old).norm() < eps:
break
return v | PypiClean |
/CloudHummingbird-1.1.0-py3-none-any.whl/Hummingbird/scheduler.py |
import json
import logging
import os
import subprocess
import sys
import time
from datetime import datetime, timedelta
from typing import List
from retry import retry
from .errors import SchedulerException
from .hummingbird_utils import PLATFORM
class Scheduler(object):
"""Dsub scheduler construction and execution."""
def __init__(self, tool, conf):
self.tool = tool
if self.tool == 'dsub':
self.cmd = 'dsub'
self.add_argument('--provider', 'google-v2')
self.add_argument('--project', conf['Platform']['project'])
if 'zones' in conf['Platform']:
self.add_argument('--zones', conf['Platform']['zones'])
else:
self.add_argument('--regions', conf['Platform']['regions'])
def add_argument(self, argname, value=None):
"""Add one argument to cmd string."""
if value:
self.cmd += ' ' + argname + ' ' + value
else:
self.cmd += ' ' + argname
def run(self):
"""Run cmd as subprocess and return the Popen object."""
logging.debug(self.cmd)
return subprocess.Popen(self.cmd, shell=True)
class BaseBatchSchduler(object):
job_def_name = 'hummingbird-job'
compute_env_prefix = 'hummingbird-env-'
class AWSBatchScheduler(BaseBatchSchduler):
def __init__(self, conf, machine, disk_size, script, **kwargs):
self.conf = conf
self.machine = machine
self.disk_size = disk_size
self.script = script
self.image = kwargs.get('image')
self.region = conf[PLATFORM]['regions']
import boto3
self.batch_client = boto3.client('batch', region_name=self.region)
self.ec2_client = boto3.client('ec2', region_name=self.region)
self.s3_bucket = boto3.resource('s3').Bucket(self.conf[PLATFORM]['bucket'])
self.cf_client = boto3.client('cloudformation', region_name=self.region)
self.cf_stack_name = conf[PLATFORM]['cloudformation_stack_name']
super(AWSBatchScheduler, self).__init__()
def create_or_update_launch_template(self):
with open('AWS/launch-template-data.json') as f:
data = json.load(f)
data['LaunchTemplateData']['BlockDeviceMappings'][-1]['Ebs']['VolumeSize'] = int(self.disk_size)
from botocore.exceptions import ClientError
try:
response = self.ec2_client.describe_launch_templates(LaunchTemplateNames=[data['LaunchTemplateName']])
except ClientError:
response = {}
if not response.get('LaunchTemplates'):
logging.info('Creating launch template %s as it does not exist', data['LaunchTemplateName'])
self.ec2_client.create_launch_template(**data)
else:
logging.info('Creating a new version for launch template %s', data['LaunchTemplateName'])
self.ec2_client.create_launch_template_version(**data)
def create_or_update_compute_environment(self, cf_output):
with open('AWS/compute_environment.json') as f:
data = json.load(f)
compute_env_name = self.cf_stack_name + '-' + self.machine.name.replace('.', '_') + '-' + str(self.disk_size)
desc_json = self.batch_client.describe_compute_environments(computeEnvironments=[compute_env_name])
if desc_json['computeEnvironments']:
logging.info('Skipping creation of AWS Batch Compute environment %s as it already exists', compute_env_name)
return compute_env_name
compute_resources = data['computeResources']
data['computeEnvironmentName'] = compute_env_name
compute_resources['instanceTypes'].append(self.machine.name)
if 'EC2KeyPair' in cf_output and cf_output['EC2KeyPair']:
compute_resources['ec2KeyPair'] = cf_output
data['serviceRole'] = cf_output['BatchServiceRoleARN']
compute_resources['subnets'] = [cf_output['PrivateSubnet1'], cf_output['PrivateSubnet2']]
compute_resources['securityGroupIds'] = [cf_output['BatchEC2SecurityGroup']]
compute_resources['instanceRole'] = cf_output['ECSInstanceProfileRoleARN']
data['tags'] = {'Name': compute_env_name}
logging.info('Attempting to create AWS Batch Compute environment: %s', compute_env_name)
self.batch_client.create_compute_environment(**data)
import botocore.waiter
try:
logging.info('Waiting for AWS Batch Compute environment %s to provision...', compute_env_name)
waiter = self.get_compute_environment_waiter(compute_env_name)
waiter.wait(computeEnvironments=[compute_env_name])
except botocore.waiter.WaiterError as e:
msg = f"There was an error with the AWS Batch Compute Environment: {compute_env_name}"
logging.exception(msg)
raise SchedulerException(msg)
logging.info('Successfully created AWS Batch Compute environment: %s', compute_env_name)
return compute_env_name
def get_compute_environment_waiter(self, waiter_id):
from botocore.waiter import WaiterModel
model = WaiterModel({
'version': 2,
'waiters': {
waiter_id: {
'delay': 1,
'operation': 'DescribeComputeEnvironments',
'maxAttempts': 20,
'acceptors': [
{
'expected': 'VALID',
'matcher': 'pathAll',
'state': 'success',
'argument': 'computeEnvironments[].status'
},
{
'expected': 'INVALID',
'matcher': 'pathAny',
'state': 'failure',
'argument': 'computeEnvironments[].status'
}
]
}
}
})
from botocore import waiter
return waiter.create_waiter_with_client(waiter_id, model, self.batch_client)
def create_or_update_job_queue(self, env_name):
job_queue_name = env_name + '-queue'
desc_json = self.batch_client.describe_job_queues(jobQueues=[job_queue_name])
env = {"order": 1, "computeEnvironment": env_name}
data = {'computeEnvironmentOrder': [env]}
if desc_json['jobQueues']: # Create if not exist
data["jobQueue"] = job_queue_name
logging.info('Attempting to update AWS Batch Job Queue: %s', job_queue_name)
self.batch_client.update_job_queue(**data)
else:
data['jobQueueName'] = job_queue_name
data['state'] = 'ENABLED'
data['priority'] = 100
data['tags'] = {'Name': job_queue_name, 'ComputeEnvironment': env_name}
logging.info('Attempting to create AWS Batch Job Queue: %s', job_queue_name)
self.batch_client.create_job_queue(**data)
from botocore.waiter import WaiterError
try:
logging.info('Ensuring AWS Batch Job Queue %s is valid...', job_queue_name)
job_queue_waiter = self.get_compute_job_queue_waiter(job_queue_name)
job_queue_waiter.wait(jobQueues=[job_queue_name])
logging.info('AWS Batch Job Queue %s is valid', job_queue_name)
except WaiterError as e:
msg = f"There was an error with the AWS Batch Job Queue: {job_queue_name}"
logging.exception(msg)
raise SchedulerException(msg)
return job_queue_name
def register_job_definition(self, cf_output, compute_env_name, job_queue_name):
with open('AWS/job-definition.json') as f:
data = json.load(f)
data['containerProperties']['vcpus'] = self.machine.cpu
data['containerProperties']['memory'] = int(self.machine.mem) * 1024
data['containerProperties']['jobRoleArn'] = cf_output['ECSTaskExecutionRoleARN']
if self.image:
data['containerProperties']['image'] = self.image
job_definition_name = data.get('jobDefinitionName', self.job_def_name)
data.setdefault('tags', {})
data['tags'].update({'Name': job_definition_name, 'ComputeEnvironment': compute_env_name, 'JobQueue': job_queue_name})
self.batch_client.register_job_definition(**data)
logging.info('Successfully registered AWS Batch Job Definition: %s', job_definition_name)
return job_definition_name
def get_cf_stack_output(self):
logging.info('Attempting to query Cloudformation Stack: %s', self.cf_stack_name)
response = self.cf_client.describe_stacks(StackName=self.cf_stack_name)
stacks = response['Stacks']
if not stacks or 'Outputs' not in stacks[0] or not stacks[0]['Outputs']:
msg = f"Unable to query Cloudformation Stack {self.cf_stack_name}"
logging.exception(msg)
raise SchedulerException(msg)
cf_output = {}
for key in ['PrivateSubnet1', 'PrivateSubnet2', 'BatchEC2SecurityGroup', 'ECSInstanceProfileRoleARN', 'ECSTaskExecutionRoleARN', 'BatchServiceRoleARN']:
for kv in stacks[0]['Outputs']:
if kv['OutputKey'] == key:
cf_output[key] = kv['OutputValue']
if key not in cf_output:
msg = f"Cloudformation stack {self.cf_stack_name} is missing required output: {key}"
logging.exception(msg)
raise SchedulerException(msg)
logging.info('Successfully queried Cloudformation Stack: %s', self.cf_stack_name)
return cf_output
def submit_job(self, tries=1):
cf_output = self.get_cf_stack_output()
self.create_or_update_launch_template()
compute_env_name = self.create_or_update_compute_environment(cf_output)
job_queue_name = self.create_or_update_job_queue(compute_env_name)
job_definition_name = self.register_job_definition(cf_output, compute_env_name, job_queue_name)
jobname = os.path.basename(self.script)
s3_path = 'script/' + jobname + '.sh'
self.s3_bucket.upload_file(self.script, s3_path)
data = dict()
data['vcpus'] = self.machine.cpu
data['memory'] = int(self.machine.mem * 1024 * 0.9)
data['command'] = [jobname + '.sh']
data['environment'] = [
{"name": "BATCH_FILE_TYPE", "value": "script"},
{"name": "BATCH_FILE_S3_URL", "value": "s3://{}/{}".format(self.conf[PLATFORM]['bucket'], s3_path)}
]
arguments = {
'jobName': jobname,
'jobQueue': job_queue_name,
'jobDefinition': job_definition_name,
'containerOverrides': data,
'tags': {
'Name': jobname,
'ComputeEnvironment': compute_env_name,
'JobQueue': job_queue_name,
'JobDefinitionName': job_definition_name
},
'propagateTags': True
}
if tries > 1:
arguments['arrayProperties'] = {'size': tries}
desc_json = self.batch_client.submit_job(**arguments)
job_id = desc_json['jobId']
logging.info('You can observe the job status via AWS Console: '
'https://console.aws.amazon.com/batch/home?region=%s#jobs/%s/%s',
self.region, 'array-job' if tries > 1 else 'detail', job_id)
return job_id
def wait_jobs(self, jobs_list):
from botocore.waiter import WaiterError
waiter_id = '_'.join(jobs_list)
logging.info('Waiting for AWS Batch Jobs %s to finish...', jobs_list)
try:
job_waiter = self.get_compute_job_waiter(waiter_id)
job_waiter.wait(jobs=jobs_list)
except WaiterError as e:
msg = f"There was an error with AWS Batch Jobs {jobs_list}"
logging.exception(msg)
raise SchedulerException(msg)
logging.info('AWS Batch Jobs %s have completed', jobs_list)
def get_compute_job_waiter(self, waiter_id):
from botocore.waiter import WaiterModel, create_waiter_with_client
model = WaiterModel({
'version': 2,
'waiters': {
waiter_id: {
'delay': 60,
'operation': 'DescribeJobs',
'maxAttempts': 24 * 60 * 2, # timeout of 2 days
'acceptors': [
{
'expected': 'SUCCEEDED',
'matcher': 'pathAll',
'state': 'success',
'argument': 'jobs[].status'
},
{
'expected': 'FAILED',
'matcher': 'pathAny',
'state': 'failure',
'argument': 'jobs[].status'
}
]
}
}
})
return create_waiter_with_client(waiter_id, model, self.batch_client)
def get_compute_job_queue_waiter(self, waiter_id):
from botocore.waiter import WaiterModel
model = WaiterModel({
'version': 2,
'waiters': {
waiter_id: {
'delay': 10,
'operation': 'DescribeJobQueues',
'maxAttempts': 20,
'acceptors': [
{
'expected': 'VALID',
'matcher': 'pathAll',
'state': 'success',
'argument': 'jobQueues[].status'
},
{
'expected': 'INVALID',
'matcher': 'pathAny',
'state': 'failure',
'argument': 'jobQueues[].status'
}
]
}
}
})
from botocore import waiter
return waiter.create_waiter_with_client(waiter_id, model, self.batch_client)
class AzureBatchScheduler(BaseBatchSchduler):
def __init__(self, conf, machine, disk_size, script, **kwargs):
self.conf = conf
self.machine = machine
self.disk_size = disk_size
self.script = script
self.script_target_name = os.path.basename(self.script) + '.sh' if script else None
self.task_definition = self._get_task_definition()
self.image = kwargs.get('image', self.task_definition['image'])
self.batch_client = self._get_azure_batch_client(conf)
self.container_client = self._get_azure_container_client(conf)
super(AzureBatchScheduler, self).__init__()
@staticmethod
def _get_azure_batch_client(conf):
from azure.batch import batch_auth, BatchServiceClient
creds = batch_auth.SharedKeyCredentials(conf[PLATFORM]['batch_account'], conf[PLATFORM]['batch_key'])
batch_url = f"https://{conf[PLATFORM]['batch_account']}.{conf[PLATFORM]['location']}.batch.azure.com"
return BatchServiceClient(creds, batch_url=batch_url)
@staticmethod
def _get_azure_container_client(conf):
from azure.storage.blob import BlobServiceClient
client = BlobServiceClient.from_connection_string(conf[PLATFORM]['storage_connection_string'])
container_client = client.get_container_client(container=conf[PLATFORM]['storage_container'])
return container_client
@staticmethod
def _get_task_definition():
path = os.path.join(os.path.dirname(__file__), 'Azure/task.json')
with open(path, 'r') as task:
return json.load(task)[0]
@retry(tries=3, delay=1)
def create_pool(self):
from azure.batch import models as batchmodels
pool_id = self.compute_env_prefix + self.machine.name + '-' + str(self.disk_size)
pool = self.get_pool(pool_id)
if pool is not None:
return pool_id
sku_to_use, image_ref_to_use = self.select_latest_verified_vm_image_with_node_agent_sku()
container_configuration = batchmodels.ContainerConfiguration(container_image_names=[self.image])
config = batchmodels.VirtualMachineConfiguration(
image_reference=image_ref_to_use,
node_agent_sku_id=sku_to_use,
data_disks=[batchmodels.DataDisk(disk_size_gb=self.disk_size, lun=1)],
container_configuration=container_configuration,
)
pool = batchmodels.PoolAddParameter(
id=pool_id,
display_name=pool_id,
virtual_machine_configuration=config,
vm_size=self.machine.name,
)
if self.conf[PLATFORM].get('low_priority', False):
pool.target_low_priority_nodes = 1
else:
pool.target_dedicated_nodes = 1
self.batch_client.pool.add(pool)
while self.get_pool(pool_id) is None:
time.sleep(1)
return pool_id
@retry(tries=3, delay=1)
def get_pool(self, name: str):
from azure.batch.models import BatchErrorException
try:
pool = self.batch_client.pool.get(name)
if pool and getattr(pool, 'id') == name:
return pool
except BatchErrorException:
pool = None
return pool
@retry(tries=3, delay=1)
def select_latest_verified_vm_image_with_node_agent_sku(
self, publisher='microsoft-azure-batch', offer='ubuntu-server-container', sku_starts_with='16-04'):
"""Select the latest verified image that Azure Batch supports given
a publisher, offer and sku (starts with filter).
:param str publisher: vm image publisher
:param str offer: vm image offer
:param str sku_starts_with: vm sku starts with filter
:rtype: tuple
:return: (node agent sku id to use, vm image ref to use)
"""
# get verified vm image list and node agent sku ids from service
from azure.batch import models as batchmodels
options = batchmodels.AccountListSupportedImagesOptions(filter="verificationType eq 'verified'")
images = self.batch_client.account.list_supported_images(account_list_supported_images_options=options)
# pick the latest supported sku
skus_to_use = []
for image in images:
if image.image_reference.publisher.lower() == publisher.lower() \
and image.image_reference.offer.lower() == offer.lower() \
and image.image_reference.sku.startswith(sku_starts_with):
skus_to_use.append((image.node_agent_sku_id, image.image_reference))
# pick first
agent_sku_id, image_ref_to_use = skus_to_use[0]
return agent_sku_id, image_ref_to_use
@retry(tries=3, delay=1)
def create_job(self, pool_id: str):
from azure.batch import models as batchmodels
job_queue_name = pool_id + '-queue'
job = batchmodels.JobAddParameter(
id=job_queue_name,
display_name=job_queue_name,
pool_info=batchmodels.PoolInformation(pool_id=pool_id)
)
try:
self.batch_client.job.add(job)
except batchmodels.BatchErrorException as err:
if err.error.code != "JobExists":
raise SchedulerException(f"Unable to create job {job_queue_name}")
else:
logging.info("Job {!r} already exists".format(job_queue_name))
return job
@retry(tries=3, delay=1)
def add_task(self, job_id: str, default_max_tries=None):
"""
Adds a task for each input file in the collection to the specified job.
:param str job_id: The ID of the job to which to add the tasks.
created for each input file.
:param int default_max_tries: Fallback max tries.
:output task: Azure Batch task
"""
from azure.batch import models as batchmodels
if 'id' in self.task_definition:
task_id = self.task_definition.get('id')
else:
task_id = os.path.basename(self.script)
display_name = self.task_definition.get('displayName', task_id)
logging.info('Adding {} tasks to job [{}]...'.format(task_id, job_id))
container_settings = batchmodels.TaskContainerSettings(
image_name=self.image,
container_run_options='--rm'
)
platform = self.conf[PLATFORM]
environment_settings = [
batchmodels.EnvironmentSetting(name='AZURE_SUBSCRIPTION_ID', value=platform['subscription']),
batchmodels.EnvironmentSetting(name='AZURE_STORAGE_ACCOUNT', value=platform['storage_account']),
batchmodels.EnvironmentSetting(name='AZURE_STORAGE_CONTAINER', value=platform['storage_container']),
batchmodels.EnvironmentSetting(name='AZURE_STORAGE_CONNECTION_STRING',
value=platform['storage_connection_string']),
batchmodels.EnvironmentSetting(name='BLOB_NAME', value=self.script_target_name),
]
if 'environmentSettings' in self.task_definition and self.task_definition['environmentSettings'] is not None:
environment_settings.extend([
batchmodels.EnvironmentSetting(**setting) for setting in self.task_definition['environmentSettings']
])
constraints = None
if 'constraints' in self.task_definition and self.task_definition['constraints']:
constraints = batchmodels.TaskConstraints(
max_wall_clock_time=self.task_definition['constraints'].get('maxWallClockTime', "P1D"),
max_task_retry_count=self.task_definition['constraints'].get('maxTaskRetryCount', default_max_tries),
retention_time=self.task_definition['constraints'].get('retentionTime', "P1D"),
),
user_identity = batchmodels.UserIdentity(
auto_user=batchmodels.AutoUserSpecification(
scope=batchmodels.AutoUserScope.pool,
elevation_level=batchmodels.ElevationLevel.admin
)
)
task = batchmodels.TaskAddParameter(
id=task_id,
display_name=display_name,
command_line=self.task_definition['commandLine'],
constraints=constraints[0],
container_settings=container_settings,
environment_settings=environment_settings,
user_identity=user_identity,
)
for validation in task.validate():
logging.info(validation)
self.batch_client.task.add(job_id=job_id, task=task)
return task
@retry(tries=10, delay=1, backoff=2, max_delay=10)
def wait_for_tasks_to_complete(self, job_ids: List[str], timeout=timedelta(hours=24)):
"""
Returns when all tasks in the specified job reach the Completed state.
:param str job_ids: The id of the jobs whose tasks should be monitored.
:param timedelta timeout: The duration to wait for task completion. If all
tasks in the specified job do not reach Completed state within this time
period, an exception will be raised.
"""
from azure.batch import models as batchmodels
timeout_expiration = datetime.now() + timeout
print("Monitoring all tasks for 'Completed' state, timeout in {}...".format(timeout), end='')
while datetime.now() < timeout_expiration:
completed_jobs = 0
for job_id in job_ids:
print('.', end='')
sys.stdout.flush()
tasks = self.batch_client.task.list(job_id)
incomplete_tasks = [task for task in tasks if task.state != batchmodels.TaskState.completed]
if not incomplete_tasks:
completed_jobs += 1
if len(job_ids) == completed_jobs:
print()
return True
else:
time.sleep(5)
print()
raise SchedulerException("ERROR: Tasks did not reach 'Completed' state within timeout period of " + str(timeout))
@retry(tries=3, delay=1)
def upload_script(self):
if not self.script:
return
with open(self.script, 'rb') as data:
self.container_client.upload_blob(
name=self.script_target_name,
data=data,
)
def submit_job(self, tries=1):
pool_id = self.create_pool()
job = self.create_job(pool_id)
self.upload_script()
task = self.add_task(job.id, default_max_tries=tries)
return {
'pool_id': pool_id,
'task_id': task.id,
'job_id': job.id,
} | PypiClean |
/Elpotrero-1.6.2.tar.gz/Elpotrero-1.6.2/elpotrero/_files/tree/scripts/tools/config/installconfigfiles.py | import os
import sys
import re
import logging
import elpotrero.lib.files as libfiles
import elpotrero.lib.util as elutil
import configurationvalues as confvals
_logger = None
def getconfigurationpath(filelayoutkey, filename):
""" Util to get the full path of conf file"""
pathfile = os.path.join(
confvals.getprojectpath(),
confvals.getlayoutvalue(filelayoutkey),
'_build',
filename)
_checkpathexists(pathfile)
return pathfile
def _getpath(pathkey):
""" Another simple util to cut down on all the code.
All it does is pull out the configvalue based on the path key and does
some error checking to make sure the path exists.
"""
global _logger
pathfile = confvals.getconfigvalue(pathkey)
_logger.debug("key = {0}, path = {1}".format(pathkey, pathfile))
_checkpathexists(pathfile)
return pathfile
def _checkpathexists(pathfile):
""" This util is just a helper that checks if the path/file exists. If
it does not, then exit and give a message
"""
global _logger
ipsrcfile = os.path.exists(pathfile)
_logger.debug("{0} exist - {1}".format(pathfile, ipsrcfile))
if not ipsrcfile:
_logger.info("exiting because {0} does not exist".format(pathfile))
sys.exit(1)
def getdestinations():
# these are all the paths to the files we intend to modify
# all the configuration options are located in basic_configuration.conf
pdst = dict()
pdst['bindnamed'] = _getpath('paths.bind.named')
pdst['bindzones'] = _getpath('paths.bind.zones')
pdst['nginxavail'] = _getpath('paths.nginx.available')
pdst['nginxenabl'] = _getpath('paths.nginx.enabled')
pdst['supervisor'] = _getpath('paths.supervisor\.conf')
project = confvals.getconfigvalue('project')
projectpath = confvals.getprojectpath()
pdst['django'] = "{0}/{1}".format(projectpath, project)
return pdst
def getsources():
""" path to the conf versions we have that are meant to be installed
these files are all installed in scripts/conf/ name of conf / _build
we're going to use this file name a few times
"""
global _logger
confname = getconfnames()
psrc = dict()
psrc['bindnamed'] = getconfigurationpath('build.paths.bind',
'named.conf.local')
psrc['bindzones'] = getconfigurationpath('build.paths.bind',
confname['bind'])
psrc['nginxavail'] = getconfigurationpath('build.paths.nginx',
confname['nginx'])
psrc['supervisor'] = getconfigurationpath('build.paths.supervisor',
confname['supervisor'])
_logger.debug("psrc['bindnamed'] = {0}".format(psrc['bindnamed']))
_logger.debug("psrc['bindzones'] = {0}".format(psrc['bindzones']))
_logger.debug("psrc['nginxavail'] = {0}".format(psrc['nginxavail']))
_logger.debug("psrc['supervisor'] = {0}".format(psrc['supervisor']))
return psrc
def getconfnames():
"""
returns a dictionary of the correct names of the configuration files
we are using this does NOT return full path names!
"""
project = confvals.getconfigvalue('project')
domain = confvals.getconfigvalue('domain')
confnames = dict()
confnames['bind'] = 'db.{0}.{1}'.format(project, domain)
confnames['nginx'] = '{0}.{1}'.format(project, domain)
confnames['supervisor'] = '{0}.conf'.format(project)
return confnames
def installconfigurationfiles(dests, source):
"""
Contains all the code to install the configuration files found in
scripts/conf to /etc/bind, /etc/nginx, etc.
Keyword Argumetns
dests - destination paths to where we intend to copy and modify
configuration files
source - source paths to the configuration files we have in the
scripts/conf directory
"""
# bind installation
# bindnamed is contents of the file found in /etc/bind/named.conf
# bindconf is the addition we wish to make to /etc/bind/named.conf
fbind = open(dests['bindnamed'], 'r')
bindnamed = fbind.read()
fbind.close()
fbind = open(source['bindnamed'], 'r')
bindconf = fbind.read()
fbind.close()
# if the addition is not already there, then add it and write it
m = re.search(bindconf, bindnamed)
if m is None:
fbind = open(dests['bindnamed'], 'w')
fbind.write("{0}\n{1}".format(bindnamed, bindconf))
# now copy the zone file we have for this project
# to the /etc/bind/zones directory
libfiles.copyanything(source['bindzones'],
dests['bindzones'])
# nginx installation
# first place our nginx conf file into /etc/nginx/sites-available
# then symlink it to nginx/sites-enabled
libfiles.copyanything(source['nginxavail'],
dests['nginxavail'])
confname = getconfnames()
src = os.path.join(dests['nginxavail'], confname['nginx'])
dst = os.path.join(dests['nginxenabl'], confname['nginx'])
libfiles.symcheck(src, dst)
# supervisor installation
libfiles.copyanything(source['supervisor'],
dests['supervisor'])
def uninstallconfigurationfiles(targetpaths, sources):
"""
Contains all the code to delete installed configuration files found in
/etc/bind, /etc/nginx, etc.
Keyword Argumetns
dests - destination paths to where we installed the configuration files
that are meant to be deleted
source - source paths to the configuration files we have in the
scripts/conf directory
"""
# bind un-installation
# we have to check /etc/bind/named.conf.local for anything we put
# in there and delete it
fbind = open(targetpaths['bindnamed'], 'r')
bindtarget = fbind.read()
fbind.close()
fbind = open(sources['bindnamed'], 'r')
bindconf = fbind.read()
fbind.close()
# if the addition to named.conf.local there, then
# delete it and rewrite the file
m = re.search(bindconf, bindtarget)
if m:
bindtarget = re.sub(bindconf, "", bindtarget)
fbind = open(targetpaths['bindnamed'], 'w')
fbind.write(bindtarget)
confname = getconfnames()
# remove the zone file from /etc/bind/zones
dst = os.path.join(targetpaths['bindzones'], confname['bind'])
if os.path.exists(dst):
os.remove(dst)
# nginx uninstall
# remove the file from /etc/nginx/sites-available
available = os.path.join(targetpaths['nginxavail'], confname['nginx'])
if os.path.exists(available):
os.remove(available)
# remove the symlink from /etc/nginx/sites-enabled
enabled = os.path.join(targetpaths['nginxenabl'], confname['nginx'])
if os.path.exists(enabled):
os.remove(enabled)
# remove conf file from /etc/supervisor/conf.d
supervisor = os.path.join(targetpaths['supervisor'],
confname['supervisor'])
if os.path.exists(supervisor):
os.remove(supervisor)
def help():
return """
This script is used to install configuration files from scripts/conf directory
to various locations in /etc.
Such as /etc/bind/zones /etc/nginx/sites-available, ...
There are two basic modes, install and uninstall
Note that the script will exit during an install if it sees that the required
directories for installation do not exist. However, it doesn't do a check on
the script/conf files
Usage: sudo $VIRTUAL_ENV/bin/python confinstall.py [FLAG]
flags:
-h, --help this help file
-d, --debug turn debug logging information on
-i, --install run the installation script
-u, --uninstall run the uninstallation script
-is, --installsettings install settings files as settings.modd
"""
def main(args):
global _logger
formatter = elutil.create_logformatter("%(filename)s %(message)s", "")
elutil.setup_logging('/tmp/logs', scrnlog=True,
logname='scripts.bootstrap.installconfs.debug',
screen_formatter=formatter,
debug=elutil.findflags('-d', '--debug', args))
_logger = logging.getLogger('standard.core')
destinationpaths = getdestinations()
sourcepaths = getsources()
if elutil.findflags('-h', '--help', args):
print help()
sys.exit(0)
if elutil.findflags('-i', '--install', args):
installconfigurationfiles(destinationpaths, sourcepaths)
sys.exit(0)
if elutil.findflags('-u', '--uninstall', args):
uninstallconfigurationfiles(destinationpaths, sourcepaths)
sys.exit(0)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:])) | PypiClean |
/AllyInvestPy-1.0.16.tar.gz/AllyInvestPy-1.0.16/ally/responses/quote.py | class Quote():
def __init__(self):
pass
def from_xml(self, xml):
pass
def from_json(self, json):
if 'adp_100' in json:
self.adp_100 = json['adp_100']
if 'adp_200' in json:
self.adp_200 = json['adp_200']
if 'adp_50' in json:
self.adp_50 = json['adp_50']
if 'adv_21' in json:
self.adv_21 = json['adv_21']
if 'adv_30' in json:
self.adv_30 = json['adv_30']
if 'adv_90' in json:
self.adv_90 = json['adv_90']
if 'ask' in json:
self.ask = json['ask']
if 'ask_time' in json:
self.ask_time = json['ask_time']
if 'asksz' in json:
self.asksz = json['asksz']
if 'basis' in json:
self.basis = json['basis']
if 'beta' in json:
self.beta = json['beta']
if 'bid' in json:
self.bid = json['bid']
if 'bid_time' in json:
self.bid_time = json['bid_time']
if 'bidsz' in json:
self.bidsz = json['bidsz']
if 'bidtick' in json:
self.bidtick = json['bidtick']
if 'chg' in json:
self.chg = json['chg']
if 'chg_sign' in json:
self.chg_sign = json['chg_sign']
if 'chg_t' in json:
self.chg_t = json['chg_t']
if 'cl' in json:
self.cl = json['cl']
if 'contract_size' in json:
self.contract_size = json['contract_size']
if 'cusip' in json:
self.cusip = json['cusip']
if 'date' in json:
self.date = json['date']
if 'datetime' in json:
self.datetime = json['datetime']
if 'days_to_expiration' in json:
self.days_to_expiration = json['days_to_expiration']
if 'div' in json:
self.div = json['div']
if 'divexdate' in json:
self.divexdate = json['divexdate']
if 'divfreq' in json:
self.divfreq = json['divfreq']
if 'divpaydt' in json:
self.divpaydt = json['divpaydt']
if 'dollar_value' in json:
self.dollar_value = json['dollar_value']
if 'eps' in json:
self.eps = json['eps']
if 'exch' in json:
self.exch = json['exch']
if 'exch_desc' in json:
self.exch_desc = json['exch_desc']
if 'hi' in json:
self.hi = json['hi']
if 'iad' in json:
self.iad = json['iad']
if 'idelta' in json:
self.idelta = json['idelta']
if 'igamma' in json:
self.igamma = json['igamma']
if 'imp_volatility' in json:
self.imp_volatility = json['imp_volatility']
if 'incr_vl' in json:
self.incr_vl = json['incr_vl']
if 'irho' in json:
self.irho = json['irho']
if 'issue_desc' in json:
self.issue_desc = json['issue_desc']
if 'itheta' in json:
self.itheta = json['itheta']
if 'ivega' in json:
self.ivega = json['ivega']
if 'last' in json:
self.last = json['last']
if 'lo' in json:
self.lo = json['lo']
if 'name' in json:
self.name = json['name']
if 'op_delivery' in json:
self.op_delivery = json['op_delivery']
if 'op_flag' in json:
self.op_flag = json['op_flag']
if 'op_style' in json:
self.op_style = json['op_style']
if 'op_subclass' in json:
self.op_subclass = json['op_subclass']
if 'openinterest' in json:
self.openinterest = json['openinterest']
if 'opn' in json:
self.opn = json['opn']
if 'opt_val' in json:
self.opt_val = json['opt_val']
if 'pchg' in json:
self.pchg = json['pchg']
if 'pchg_sign' in json:
self.pchg_sign = json['pchg_sign']
if 'pcls' in json:
self.pcls = json['pcls']
if 'pe' in json:
self.pe = json['pe']
if 'phi' in json:
self.phi = json['phi']
if 'plo' in json:
self.plo = json['plo']
if 'popn' in json:
self.popn = json['popn']
if 'pr_adp_100' in json:
self.pr_adp_100 = json['pr_adp_100']
if 'pr_adp_200' in json:
self.pr_adp_200 = json['pr_adp_200']
if 'pr_adp_50' in json:
self.pr_adp_50 = json['pr_adp_50']
if 'pr_date' in json:
self.pr_date = json['pr_date']
if 'pr_openinterest' in json:
self.pr_openinterest = json['pr_openinterest']
if 'prbook' in json:
self.prbook = json['prbook']
if 'prchg' in json:
self.prchg = json['prchg']
if 'prem_mult' in json:
self.prem_mult = json['prem_mult']
if 'put_call' in json:
self.put_call = json['put_call']
if 'pvol' in json:
self.pvol = json['pvol']
if 'qcond' in json:
self.qcond = json['qcond']
if 'rootsymbol' in json:
self.rootsymbol = json['rootsymbol']
if 'secclass' in json:
self.secclass = json['secclass']
if 'sesn' in json:
self.sesn = json['sesn']
if 'sho' in json:
self.sho = json['sho']
if 'strikeprice' in json:
self.strikeprice = json['strikeprice']
if 'symbol' in json:
self.symbol = json['symbol']
if 'tcond' in json:
self.tcond = json['tcond']
if 'timestamp' in json:
self.timestamp = json['timestamp']
if 'tr_num' in json:
self.tr_num = json['tr_num']
if 'tradetick' in json:
self.tradetick = json['tradetick']
if 'trend' in json:
self.trend = json['trend']
if 'under_cusip' in json:
self.under_cusip = json['under_cusip']
if 'undersymbol' in json:
self.undersymbol = json['undersymbol']
if 'vl' in json:
self.vl = json['vl']
if 'volatility12' in json:
self.volatility12 = json['volatility12']
if 'vwap' in json:
self.vwap = json['vwap']
if 'wk52hi' in json:
self.wk52hi = json['wk52hi']
if 'wk52hidate' in json:
self.wk52hidate = json['wk52hidate']
if 'wk52lo' in json:
self.wk52lo = json['wk52lo']
if 'wk52lodate' in json:
self.wk52lodate = json['wk52lodate']
if 'xdate' in json:
self.xdate = json['xdate']
if 'xday' in json:
self.xday = json['xday']
if 'xmonth' in json:
self.xmonth = json['xmonth']
if 'xyear' in json:
self.xyear = json['xyear']
if 'yield' in json:
self.yld = json['yield'] | PypiClean |
/BGT_Client-1.0.2-py3-none-any.whl/dgt_sdk/processor/core.py |
from concurrent.futures import CancelledError
import concurrent.futures
import itertools
import logging
from dgt_sdk.messaging.exceptions import ValidatorConnectionError
from dgt_sdk.messaging.future import FutureTimeoutError
from dgt_sdk.messaging.stream import RECONNECT_EVENT
from dgt_sdk.messaging.stream import Stream
from dgt_sdk.processor.context import Context
from dgt_sdk.processor.exceptions import InvalidTransaction
from dgt_sdk.processor.exceptions import InternalError
from dgt_sdk.processor.exceptions import AuthorizationException
from dgt_sdk.protobuf.processor_pb2 import TpRegisterRequest
from dgt_sdk.protobuf.processor_pb2 import TpRegisterResponse
from dgt_sdk.protobuf.processor_pb2 import TpUnregisterRequest
from dgt_sdk.protobuf.processor_pb2 import TpUnregisterResponse
from dgt_sdk.protobuf.processor_pb2 import TpProcessRequest
from dgt_sdk.protobuf.processor_pb2 import TpProcessResponse
from dgt_sdk.protobuf.network_pb2 import PingResponse
from dgt_sdk.protobuf.validator_pb2 import Message
LOGGER = logging.getLogger(__name__)
class TransactionProcessor:
"""TransactionProcessor is a generic class for communicating with a
validator and routing transaction processing requests to a registered
handler. It uses ZMQ and channels to handle requests concurrently.
"""
def __init__(self, url):
"""
Args:
url (string): The URL of the validator
"""
self._stream = Stream(url)
self._url = url
self._handlers = []
@property
def zmq_id(self):
return self._stream.zmq_id
def add_handler(self, handler):
"""Adds a transaction family handler
Args:
handler (TransactionHandler): the handler to be added
"""
self._handlers.append(handler)
def _matches(self, handler, header):
LOGGER.debug("Matches handler: %s~%s", header.family_name,handler.family_name)
return header.family_name == handler.family_name \
and header.family_version in handler.family_versions
def _find_handler(self, header):
"""Find a handler for a particular (family_name, family_versions)
:param header transaction_pb2.TransactionHeader:
:return: handler
"""
LOGGER.debug("find_handler...")
try:
return next(
handler for handler in self._handlers
if self._matches(handler, header))
except StopIteration:
LOGGER.debug("Missing handler for header: %s", header)
return None
def _register_requests(self):
"""Returns all of the TpRegisterRequests for handlers
:return (list): list of TpRegisterRequests
"""
return itertools.chain.from_iterable( # flattens the nested list
[
[TpRegisterRequest(
family=n,
version=v,
namespaces=h.namespaces)
for n, v in itertools.product(
[h.family_name],
h.family_versions,)] for h in self._handlers])
def _unregister_request(self):
"""Returns a single TP_UnregisterRequest that requests
that the validator stop sending transactions for previously
registered handlers.
:return (processor_pb2.TpUnregisterRequest):
"""
return TpUnregisterRequest()
def _process(self, msg):
if msg.message_type != Message.TP_PROCESS_REQUEST:
LOGGER.debug(
"Transaction Processor recieved invalid message type. "
"Message type should be TP_PROCESS_REQUEST,"
" but is %s", Message.MessageType.Name(msg.message_type))
return
LOGGER.debug("Transaction Processor: process %s.",Message.MessageType.Name(msg.message_type))
request = TpProcessRequest()
request.ParseFromString(msg.content)
state = Context(self._stream, request.context_id)
header = request.header
try:
if not self._stream.is_ready():
raise ValidatorConnectionError()
handler = self._find_handler(header)
if handler is None:
return
LOGGER.debug('TransactionProcessor: _process.apply context_id=%s',request.context_id)
handler.apply(request, state)
self._stream.send_back(
message_type=Message.TP_PROCESS_RESPONSE,
correlation_id=msg.correlation_id,
content=TpProcessResponse(
status=TpProcessResponse.OK
).SerializeToString())
except InvalidTransaction as it:
LOGGER.warning("Invalid Transaction %s", it)
try:
self._stream.send_back(
message_type=Message.TP_PROCESS_RESPONSE,
correlation_id=msg.correlation_id,
content=TpProcessResponse(
status=TpProcessResponse.INVALID_TRANSACTION,
message=str(it),
extended_data=it.extended_data
).SerializeToString())
except ValidatorConnectionError as vce:
# TP_PROCESS_REQUEST has made it through the
# handler.apply and an INVALID_TRANSACTION would have been
# sent back but the validator has disconnected and so it
# doesn't care about the response.
LOGGER.warning("during invalid transaction response: %s", vce)
except InternalError as ie:
LOGGER.warning("internal error: %s", ie)
try:
self._stream.send_back(
message_type=Message.TP_PROCESS_RESPONSE,
correlation_id=msg.correlation_id,
content=TpProcessResponse(
status=TpProcessResponse.INTERNAL_ERROR,
message=str(ie),
extended_data=ie.extended_data
).SerializeToString())
except ValidatorConnectionError as vce:
# Same as the prior except block, but an internal error has
# happened, but because of the disconnect the validator
# probably doesn't care about the response.
LOGGER.warning("during internal error response: %s", vce)
except ValidatorConnectionError as vce:
# Somewhere within handler.apply a future resolved with an
# error status that the validator has disconnected. There is
# nothing left to do but reconnect.
LOGGER.warning("during handler.apply a future was resolved "
"with error status: %s", vce)
except AuthorizationException as ae:
LOGGER.warning("AuthorizationException: %s", ae)
try:
self._stream.send_back(
message_type=Message.TP_PROCESS_RESPONSE,
correlation_id=msg.correlation_id,
content=TpProcessResponse(
status=TpProcessResponse.INVALID_TRANSACTION,
message=str(ae),
).SerializeToString())
except ValidatorConnectionError as vce:
# TP_PROCESS_REQUEST has made it through the
# handler.apply and an INVALID_TRANSACTION would have been
# sent back but the validator has disconnected and so it
# doesn't care about the response.
LOGGER.warning("during invalid transaction response: %s", vce)
def _process_future(self, future, timeout=None, sigint=False):
try:
LOGGER.debug('TransactionProcessor: future.result ...',)
msg = future.result(timeout)
except CancelledError:
# This error is raised when Task.cancel is called on
# disconnect from the validator in stream.py, for
# this future.
LOGGER.debug('TransactionProcessor: CancelledError')
return
LOGGER.debug('TransactionProcessor: _process_future msg=%s',str(msg))
if msg is RECONNECT_EVENT:
if sigint is False:
LOGGER.info("reregistering with validator")
self._stream.wait_for_ready()
self._register()
else:
LOGGER.debug(
'received message of type: %s',
Message.MessageType.Name(msg.message_type))
if msg.message_type == Message.PING_REQUEST:
self._stream.send_back(
message_type=Message.PING_RESPONSE,
correlation_id=msg.correlation_id,
content=PingResponse().SerializeToString())
return
self._process(msg)
def _register(self):
futures = []
for message in self._register_requests():
self._stream.wait_for_ready()
future = self._stream.send(
message_type=Message.TP_REGISTER_REQUEST,
content=message.SerializeToString())
futures.append(future)
for future in futures:
resp = TpRegisterResponse()
try:
resp.ParseFromString(future.result().content)
LOGGER.info("Register attempt: %s",
TpRegisterResponse.Status.Name(resp.status))
except ValidatorConnectionError as vce:
LOGGER.info("during waiting for response on registration: %s",
vce)
def _unregister(self):
message = self._unregister_request()
self._stream.wait_for_ready()
future = self._stream.send(
message_type=Message.TP_UNREGISTER_REQUEST,
content=message.SerializeToString())
response = TpUnregisterResponse()
try:
response.ParseFromString(future.result(1).content)
LOGGER.info("unregister attempt: %s",
TpUnregisterResponse.Status.Name(response.status))
except ValidatorConnectionError as vce:
LOGGER.info("during waiting for response on unregistration: %s",
vce)
def start(self):
"""Connects the transaction processor to a validator and starts
listening for requests and routing them to an appropriate
transaction handler.
"""
fut = None
try:
self._register()
while True:
# During long running processing this
# is where the transaction processor will
# spend most of its time
fut = self._stream.receive()
LOGGER.debug("TransactionProcessor:receive: fut=%s", str(fut))
self._process_future(fut)
except KeyboardInterrupt:
try:
# tell the validator to not send any more messages
self._unregister()
while True:
if fut is not None:
# process futures as long as the tp has them,
# if the TP_PROCESS_REQUEST doesn't come from
# zeromq->asyncio in 1 second raise a
# concurrent.futures.TimeOutError and be done.
self._process_future(fut, 1, sigint=True)
fut = self._stream.receive()
except concurrent.futures.TimeoutError:
# Where the tp will usually exit after
# a KeyboardInterrupt. Caused by the 1 second
# timeout in _process_future.
pass
except FutureTimeoutError:
# If the validator is not able to respond to the
# unregister request, exit.
pass
def stop(self):
"""Closes the connection between the TransactionProcessor and the
validator.
"""
self._stream.close() | PypiClean |
/LbNightlyTools-4.0.1-py3-none-any.whl/LbMsg/BuildMsg.py | from __future__ import absolute_import, print_function
__author__ = "Ben Couturier <[email protected]>"
import datetime
import json
import os
from .Common import Messenger
class NightliesMessenger(Messenger):
"""
Class used to connect to the NightlyBuilds queue
"""
def __init__(self):
"""
Initialize props
"""
Messenger.__init__(self)
self._topic_name = "topic.build_ready"
def sendBuildDone(
self,
slot,
project,
config,
buildId,
priority=None,
date=datetime.datetime.now(),
):
"""
Sends the message that a particular project has been built
"""
self._basicPublish(
".".join([slot, project, config]),
json.dumps(
[
{
"slot": slot,
"project": project,
"platform": config,
"build_id": buildId,
"priority": priority,
}
]
),
)
def getBuildsDone(self, queueName=None, bindingKeys=None):
"""
Get the list of builds done, for whcih messages are queued
"""
def callback(ch, method, properties, body):
print("%r\t%r" % (method.routing_key, body))
buildsDone = []
with self._getConnection() as connection:
(channel, queueName) = self._setupClientChannel(
connection.channel(), queueName, bindingKeys
)
while True:
method_frame, head_frame, body = channel.basic_get(queue=queueName)
if method_frame == None:
break
print(method_frame.routing_key, json.loads(body))
buildsDone.append(json.loads(body)[0])
channel.basic_ack(method_frame.delivery_tag)
return buildsDone
def consumeBuildsDone(self, callback, queueName=None, bindingKeys=None):
"""
Get the list of builds done, for which messages are queued
It takes a callback like so:
def callback(ch, method, properties, body):
print(" [x] %r:%r" % (method.routing_key, body))
"""
with self._getConnection() as connection:
(channel, queueName) = self._setupClientChannel(
connection.channel(), queueName, bindingKeys
)
channel.basic_consume(callback, queue=queueName, no_ack=True)
channel.start_consuming() | PypiClean |
/MySQL-python-1.2.5.zip/MySQL-python-1.2.5/_mysql_exceptions.py | try:
from exceptions import Exception, StandardError, Warning
except ImportError:
# Python 3
StandardError = Exception
class MySQLError(StandardError):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off.""" | PypiClean |
/dragonflow-4.0.0.tar.gz/dragonflow-4.0.0/dragonflow/db/drivers/redis_db_driver.py |
import re
from oslo_log import log
from redis import client as redis_client
from redis import exceptions
from dragonflow.common import exceptions as df_exceptions
from dragonflow.db import db_api
from dragonflow.db.drivers import redis_mgt
LOG = log.getLogger(__name__)
class RedisDbDriver(db_api.DbApi):
RequestRetryTimes = 5
def __init__(self):
super(RedisDbDriver, self).__init__()
self.clients = {}
self.remote_server_lists = []
self.redis_mgt = None
self.is_neutron_server = False
def initialize(self, db_ip, db_port, **args):
# get remote ip port list
self.redis_mgt = redis_mgt.RedisMgt.get_instance(db_ip, db_port)
self._update_server_list()
def _update_server_list(self):
if self.redis_mgt is not None:
self.remote_server_lists = self.redis_mgt.get_master_list()
self.clients = {}
for remote in self.remote_server_lists:
remote_ip_port = remote['ip_port']
ip_port = remote_ip_port.split(':')
self.clients[remote_ip_port] = \
redis_client.StrictRedis(host=ip_port[0], port=ip_port[1])
def create_table(self, table):
# Not needed in redis
pass
def delete_table(self, table):
local_key = self._uuid_to_key(table, '*', '*')
for host, client in self.clients.items():
local_keys = client.keys(local_key)
if len(local_keys) > 0:
for tmp_key in local_keys:
try:
self._execute_cmd("DEL", tmp_key)
except Exception:
LOG.exception("exception when delete_table: "
"%(key)s ", {'key': local_key})
def _handle_db_conn_error(self, ip_port, local_key=None):
self.redis_mgt.remove_node_from_master_list(ip_port)
self._update_server_list()
if local_key is not None:
LOG.exception("update server list, key: %(key)s",
{'key': local_key})
def _sync_master_list(self):
if self.is_neutron_server:
result = self.redis_mgt.redis_get_master_list_from_syncstring(
redis_mgt.RedisMgt.global_sharedlist.raw)
if result:
self._update_server_list()
def _gen_args(self, local_key, value):
args = []
args.append(local_key)
if value is not None:
args.append(value)
return args
def _is_oper_valid(self, oper):
if oper == 'SET' or oper == 'GET' or oper == 'DEL':
return True
return False
def _update_client(self, local_key):
self._sync_master_list()
ip_port = self.redis_mgt.get_ip_by_key(local_key)
client = self._get_client(local_key, ip_port)
return client
def _execute_cmd(self, oper, local_key, value=None):
if not self._is_oper_valid(oper):
LOG.warning("invalid oper: %(oper)s",
{'oper': oper})
return None
ip_port = self.redis_mgt.get_ip_by_key(local_key)
client = self._get_client(local_key, ip_port)
if client is None:
return None
arg = self._gen_args(local_key, value)
ttl = self.RequestRetryTimes
asking = False
alreadysync = False
while ttl > 0:
ttl -= 1
try:
if asking:
client.execute_command('ASKING')
asking = False
return client.execute_command(oper, *arg)
except exceptions.ConnectionError as e:
if not alreadysync:
client = self._update_client(local_key)
alreadysync = True
continue
self._handle_db_conn_error(ip_port, local_key)
LOG.exception("connection error while sending "
"request to db: %(e)s", {'e': e})
raise e
except exceptions.ResponseError as e:
if not alreadysync:
client = self._update_client(local_key)
alreadysync = True
continue
resp = str(e).split(' ')
if 'ASK' in resp[0]:
# one-time flag to force a node to serve a query about an
# IMPORTING slot
asking = True
if 'ASK' in resp[0] or 'MOVE' in resp[0]:
# MOVED/ASK XXX X.X.X.X:X
# do redirection
client = self._get_client(host=resp[2])
if client is None:
# maybe there is a fast failover
self._handle_db_conn_error(ip_port, local_key)
LOG.exception("no client available: "
"%(ip_port)s, %(e)s",
{'ip_port': resp[2], 'e': e})
raise e
else:
LOG.exception("error not handled: %(e)s",
{'e': e})
raise e
except Exception as e:
if not alreadysync:
client = self._update_client(local_key)
alreadysync = True
continue
self._handle_db_conn_error(ip_port, local_key)
LOG.exception("exception while sending request to "
"db: %(e)s", {'e': e})
raise e
def _find_key_without_topic(self, table, key):
local_key = self._uuid_to_key(table, key, '*')
self._sync_master_list()
for client in self.clients.values():
local_keys = client.keys(local_key)
if len(local_keys) == 1:
return local_keys[0]
def get_key(self, table, key, topic=None):
if topic:
local_key = self._uuid_to_key(table, key, topic)
else:
local_key = self._find_key_without_topic(table, key)
if local_key is None:
raise df_exceptions.DBKeyNotFound(key=key)
try:
res = self._execute_cmd("GET", local_key)
if res is not None:
return res
except Exception:
LOG.exception("exception when get_key: %(key)s",
{'key': local_key})
raise df_exceptions.DBKeyNotFound(key=key)
def set_key(self, table, key, value, topic=None):
local_key = self._uuid_to_key(table, key, topic)
try:
res = self._execute_cmd("SET", local_key, value)
if res is None:
res = 0
return res
except Exception:
LOG.exception("exception when set_key: %(key)s",
{'key': local_key})
def create_key(self, table, key, value, topic=None):
return self.set_key(table, key, value, topic)
def delete_key(self, table, key, topic=None):
if topic:
local_key = self._uuid_to_key(table, key, topic)
else:
local_key = self._find_key_without_topic(table, key)
if local_key is None:
raise df_exceptions.DBKeyNotFound(key=key)
try:
res = self._execute_cmd("DEL", local_key)
if res is None:
res = 0
return res
except Exception:
LOG.exception("exception when delete_key: %(key)s",
{'key': local_key})
def get_all_entries(self, table, topic=None):
res = []
ip_port = None
self._sync_master_list()
if not topic:
local_key = self._uuid_to_key(table, '*', '*')
try:
for host, client in self.clients.items():
local_keys = client.keys(local_key)
if len(local_keys) > 0:
for tmp_key in local_keys:
res.append(self._execute_cmd("GET", tmp_key))
return res
except Exception:
LOG.exception("exception when get_all_entries: %(key)s",
{'key': local_key})
else:
local_key = self._uuid_to_key(table, '*', topic)
try:
ip_port = self.redis_mgt.get_ip_by_key(local_key)
client = self._get_client(local_key, ip_port)
if client is None:
return res
local_keys = client.keys(local_key)
if len(local_keys) > 0:
res.extend(client.mget(local_keys))
return res
except Exception as e:
self._handle_db_conn_error(ip_port, local_key)
LOG.exception("exception when mget: %(key)s, %(e)s",
{'key': local_key, 'e': e})
def get_all_keys(self, table, topic=None):
res = []
ip_port = None
self._sync_master_list()
if not topic:
local_key = self._uuid_to_key(table, '*', '*')
try:
for host, client in self.clients.items():
ip_port = host
res.extend(client.keys(local_key))
return [self._strip_table_name_from_key(key) for key in res]
except Exception as e:
self._handle_db_conn_error(ip_port, local_key)
LOG.exception("exception when get_all_keys: %(key)s, %(e)s",
{'key': local_key, 'e': e})
else:
local_key = self._uuid_to_key(table, '*', topic)
try:
ip_port = self.redis_mgt.get_ip_by_key(local_key)
client = self._get_client(local_key, ip_port)
if client is None:
return res
res = client.keys(local_key)
return [self._strip_table_name_from_key(key) for key in res]
except Exception as e:
self._handle_db_conn_error(ip_port, local_key)
LOG.exception("exception when get_all_keys: %(key)s, %(e)s",
{'key': local_key, 'e': e})
def _strip_table_name_from_key(self, key):
regex = '^{.*}\\.(.*)$'
m = re.match(regex, key)
return m.group(1)
def _allocate_unique_key(self, table):
local_key = self._uuid_to_key('unique_key', table, None)
ip_port = None
try:
client = self._update_client(local_key)
if client is None:
return None
return client.incr(local_key)
except Exception as e:
self._handle_db_conn_error(ip_port, local_key)
LOG.exception("exception when incr: %(key)s, %(e)s",
{'key': local_key, 'e': e})
def allocate_unique_key(self, table):
try:
return self._allocate_unique_key(table)
except Exception as e:
LOG.error("allocate_unique_key exception: %(e)s",
{'e': e})
return
def _uuid_to_key(self, table, key, topic):
if not topic:
local_key = ('{' + table + '.' + '}' + '.' + key)
else:
local_key = ('{' + table + '.' + topic + '}' + '.' + key)
return local_key
def _get_client(self, key=None, host=None):
if host is None:
ip_port = self.redis_mgt.get_ip_by_key(key)
if ip_port is None:
return None
else:
ip_port = host
client = self.clients.get(ip_port, None)
if client is not None:
return self.clients[ip_port]
else:
return None
def process_ha(self):
if self.is_neutron_server:
self._sync_master_list()
else:
self._update_server_list()
def set_neutron_server(self, is_neutron_server):
self.is_neutron_server = is_neutron_server | PypiClean |
/MAGINE-0.1.5.tar.gz/MAGINE-0.1.5/magine/mappings/chemical_mapper.py | from bioservices import UniChem
import pandas as pd
from sortedcontainers import SortedSet, SortedDict
from magine.mappings.databases.download_libraries import HMDB
try:
import cPickle as pickle
except: # python3 doesnt have cPickle
import pickle
try:
basestring
# Allows isinstance(foo, basestring) to work in Python 3
except:
basestring = str
chem = UniChem()
class ChemicalMapper(object):
""" Convert chemical species across various ids.
Database was creating using HMDB
"""
valid_columns = ['kegg_id', 'name', 'accession', 'chebi_id', 'inchikey',
'chemspider_id', 'biocyc_id', 'synonyms', 'iupac_name',
'pubchem_compound_id', 'protein_associations',
'ontology', 'drugbank_id', 'chemical_formula',
'smiles', 'metlin_id', 'average_molecular_weight',
'secondary_accessions'
]
def __init__(self, fresh_download=False):
"""
Parameters
----------
fresh_download: bool
download new copy of database
"""
self.database = None
self._hmdb_to_chem_name = None
self._chem_name_to_hmdb = None
self._hmdb_to_kegg = None
self._kegg_to_hmdb = None
self.synonyms_to_hmdb = None
self._drugbank_to_hmdb = None
self._hmdb_to_protein = None
self._hmdb_main_to_protein = None
self._hmdb_accession_to_main = None
hmdb_database = HMDB().load_db(fresh_download=fresh_download)
self.database = hmdb_database.where((pd.notnull(hmdb_database)), None)
self.database['main_accession'] = self.database['accession']
sub_db = self.database[
self.database['secondary_accessions'].str.contains('|', na=False)]
new_df = tidy_split(sub_db, 'secondary_accessions', '|')
new_df['accession'] = new_df['secondary_accessions']
self.database = pd.concat([self.database, new_df])
self.kegg_hmdb = chem.get_mapping("kegg_ligand", "hmdb")
self.kegg_to_hmdb = self._to_dict("kegg_id", "main_accession")
self.hmdb_to_chem_name = self._to_dict("main_accession", "name")
@property
def hmdb_to_kegg(self):
if self._hmdb_to_kegg is None:
self._hmdb_to_kegg = self._to_dict("accession", "kegg_id")
return self._hmdb_to_kegg
@property
def chem_name_to_hmdb(self):
if self._chem_name_to_hmdb is None:
self._chem_name_to_hmdb = self._to_dict("name", "main_accession")
return self._chem_name_to_hmdb
@property
def drugbank_to_hmdb(self):
if self._drugbank_to_hmdb is None:
self._drugbank_to_hmdb = self._to_dict("drugbank_id",
"main_accession")
return self._drugbank_to_hmdb
@property
def hmdb_to_protein(self):
if self._hmdb_to_protein is None:
self._hmdb_to_protein = self._from_list_dict(
"accession", "protein_associations"
)
return self._hmdb_to_protein
@property
def hmdb_main_to_protein(self):
if self._hmdb_main_to_protein is None:
self._hmdb_main_to_protein = self._from_list_dict(
"main_accession", "protein_associations"
)
return self._hmdb_main_to_protein
@property
def hmdb_accession_to_main(self):
if self._hmdb_accession_to_main is None:
self._hmdb_accession_to_main = self._from_list_dict(
"accession", "main_accession"
)
return self._hmdb_accession_to_main
def _to_dict(self, key, value):
""" creates a dictionary with a list of values for each key
Parameters
----------
key : str
value : str
Returns
-------
dict
"""
d = self.database[[key, value]].copy()
d.dropna(how='any', inplace=True)
return_dict = SortedDict()
for i, j in d.values:
i = i.strip()
if i not in return_dict:
return_dict[i] = set()
return_dict[i].add(j)
return return_dict
def _from_list_dict(self, key, value):
d = self.database[[key, value]].copy()
d.dropna(how='any', inplace=True)
return_dict = SortedDict()
for i, j in d.values:
if i in return_dict:
return_dict[i].update(j.split('|'))
else:
return_dict[i] = SortedSet(j.split('|'))
return return_dict
def check_synonym_dict(self, term, format_name):
""" checks hmdb database for synonyms and returns formatted name
Parameters
----------
term : str
format_name : str
Returns
-------
dict
Examples
--------
>>> cm = ChemicalMapper()
>>> cm.check_synonym_dict(term='dodecene', format_name='main_accession')
['HMDB0000933', 'HMDB0059874']
"""
synonyms = self.database[['synonyms', format_name]].copy()
synonyms.dropna(how='any', inplace=True)
synonyms['synonyms'] = synonyms['synonyms'].str.lower()
hits = synonyms[synonyms['synonyms'].str.contains(term.lower())]
matches = sorted(set(hits[format_name].values))
return matches
def print_info(self):
""" print information about the dataframe
Returns
-------
"""
print('Number of HMDB accessions = {0}'.format(
len(self.database['accession'].unique())))
print('Number of unique KEGG ids = {0}'.format(
len(self.hmdb_to_kegg.keys())))
print('Number of HMDB to KEGG mappings = {0}'.format(
len(self.kegg_to_hmdb.values())))
def convert_kegg_nodes(self, network):
"""
Maps network from kegg to gene names
Parameters
----------
network : nx.DiGraph
Returns
-------
dict
"""
still_unknown = []
hits = [i for i in set(network.nodes) if i.startswith('cpd:')]
net_kegg_names = dict()
net_chem_names = dict()
net_cpd_to_hmdb = dict()
for i in hits:
name_stripped = i.lstrip('cpd:')
net_kegg_names[i] = name_stripped
if name_stripped in self.kegg_to_hmdb:
mapping = self.kegg_to_hmdb[name_stripped]
if isinstance(mapping, (list, set, SortedSet)):
names = '|'.join(set(mapping))
chem_names = set()
for name in mapping:
try:
chem_names.update(self.hmdb_to_chem_name[name])
except:
continue
net_cpd_to_hmdb[i] = names
net_chem_names[i] = order_merge(chem_names)
elif isinstance(mapping, basestring):
chem_n = self.hmdb_to_chem_name[mapping]
net_cpd_to_hmdb[i] = mapping
net_chem_names[i] = '|'.join(chem_n.encode('ascii',
'ignore'))
else:
print('Returned something else...', mapping)
elif i in compound_manual:
loc = compound_manual[i]
net_cpd_to_hmdb[i] = loc
if loc in self.hmdb_to_chem_name:
net_chem_names[i] = order_merge(
self.hmdb_to_chem_name[loc])
else:
still_unknown.append(i)
if len(still_unknown):
for i in still_unknown:
name_stripped = i.lstrip('cpd:')
if name_stripped in self.kegg_hmdb:
net_cpd_to_hmdb[i] = self.kegg_hmdb[name_stripped]
# else:
# print("Cannot find a HMDB mapping for %s " % i)
return net_cpd_to_hmdb, net_kegg_names, net_chem_names
# manually created based on missing in KEGG
compound_manual = {
'cpd:C07909': 'HMDB0015015',
'cpd:C16844': 'HMDB0001039',
'cpd:C00076': 'HMDB0000464',
'cpd:C00154': 'HMDB0001338',
'cpd:C01561': 'HMDB0003550',
'cpd:C04043': 'HMDB0003791',
'cpd:C01165': 'HMDB0002104',
'cpd:C00025': 'HMDB0000148',
'cpd:C00696': 'HMDB0001403',
'cpd:C00124': 'HMDB0000143',
}
def order_merge(species_set):
return '|'.join(sorted(species_set))
def tidy_split(df, column, sep='|', keep=False):
"""
Split the values of a column and expand so the new DataFrame has one split
value per row. Filters rows where the column is missing.
Params
------
df : pandas.DataFrame
dataframe with the column to split and expand
column : str
the column to split and expand
sep : str
the string used to split the column's values
keep : bool
whether to retain the presplit value as it's own row
Returns
-------
pandas.DataFrame
Returns a dataframe with the same columns as `df`.
"""
indexes = list()
new_values = list()
df = df.dropna(subset=[column])
for i, presplit in enumerate(df[column].astype(str)):
values = presplit.split(sep)
if keep and len(values) > 1:
indexes.append(i)
new_values.append(presplit)
for value in values:
indexes.append(i)
new_values.append(value)
new_df = df.iloc[indexes, :].copy()
new_df[column] = new_values
return new_df
if __name__ == "__main__":
cm = ChemicalMapper()
print(cm.check_synonym_dict(term='dodecene', format_name='main_accession'))
print(cm.hmdb_accession_to_main['HMDB15015'])
print(cm.hmdb_accession_to_main['HMDB0015015'])
# print(cm.hmdb_to_kegg['HMDB0015015'])
print(cm.kegg_to_hmdb.keys())
print(cm.kegg_to_hmdb['C07467']) | PypiClean |
/AsyncDex-1.1.tar.gz/AsyncDex-1.1/asyncdex/models/author.py | from typing import Any, Dict, Optional, TYPE_CHECKING
from .abc import Model
from .manga_list import MangaList
from .mixins import DatetimeMixin
from ..constants import routes
from ..utils import DefaultAttrDict, copy_key_to_attribute
if TYPE_CHECKING:
from ..client import MangadexClient
class Author(Model, DatetimeMixin):
"""A :class:`.Model` representing an individual author.
.. note::
Artists and authors are stored identically and share all properties.
.. versionadded:: 0.2
"""
name: str
"""The name of the author."""
image: Optional[str]
"""An image of the author, if available."""
biographies: DefaultAttrDict[Optional[str]]
"""A :class:`.DefaultAttrDict` holding the biographies of the author."""
mangas: MangaList
"""A list of all the mangas that belong to the author.
.. note::
In order to efficiently get all mangas in one go, use:
.. code-block:: python
await author.load_mangas()
"""
def __init__(
self,
client: "MangadexClient",
*,
id: Optional[str] = None,
version: int = 0,
data: Optional[Dict[str, Any]] = None,
):
self.mangas = MangaList(client)
self.biographies = DefaultAttrDict(default=lambda: None)
super().__init__(client, id=id, version=version, data=data)
def parse(self, data: Dict[str, Any]):
super().parse(data)
if "data" in data and "attributes" in data["data"]:
attributes = data["data"]["attributes"]
copy_key_to_attribute(attributes, "name", self)
copy_key_to_attribute(attributes, "imageUrl", self, "image")
if "biography" in attributes and attributes["biography"]:
for item in attributes["biography"]:
for key, value in item.items():
self.biographies[key] = value
self._process_times(attributes)
self._parse_relationships(data)
async def fetch(self):
"""Fetch data about the author. |permission| ``author.view``
:raises: :class:`.InvalidID` if an author with the ID does not exist.
"""
await self._fetch("author.view", "author")
async def load_mangas(self):
"""Shortcut method that calls :meth:`.MangadexClient.batch_mangas` with the mangas that belong to the author.
Roughly equivalent to:
.. code-block:: python
await client.batch_mangas(*author.mangas)
"""
await self.client.batch_mangas(*self.mangas)
async def update(self):
"""Update the author. |auth|
.. versionadded:: 0.5
"""
if not hasattr(self, "name"):
await self.fetch()
params = {"name": self.name, "version": self.version}
self.client.raise_exception_if_not_authenticated("PUT", routes["author"])
r = await self.client.request("PUT", routes["author"].format(id=self.id), json=params)
json = await r.json()
r.close()
obj = type(self)(self.client, data=json)
self.transfer(obj)
async def delete(self):
"""Delete the author. |auth|
.. versionadded:: 0.5
"""
return await self._delete("author") | PypiClean |
/DynamicForms-0.74.8-py3-none-any.whl/dynamicforms_legacy/mixins/render.py | import uuid as uuid_module
from enum import IntEnum
from typing import Hashable
from rest_framework.fields import Field as DrfField
from rest_framework.relations import ManyRelatedField, PKOnlyObject, RelatedField
from rest_framework.serializers import ListSerializer
from rest_framework.templatetags import rest_framework as drftt
from dynamicforms_legacy.settings import DYNAMICFORMS
class DisplayMode(IntEnum):
SUPPRESS = 1 # Field will be entirely suppressed. it will not render (not even to JSON) and will not parse for PUT
HIDDEN = 5 # Field will render as <input type="hidden"> or <tr data-field_name>
INVISIBLE = 8 # Field will render completely, but with display: none. Equal to setting its style = {display: none}
FULL = 10 # Field will render completely
class RenderMixin(object):
"""
Is used in fields and serializers, so every field and serializer gets its unique id. Also to specify where and how
fields should render.
In form where serializer is used, id is serializers uuid. Table with list of records has id »list-serializer.uuid«,
in dialog id is »dialog-{serializer.uuid}« and save button's id on dialog is »save-{serializer.uuid}«
Similar for fields: All inputs in HTML get id from field.uuid. Div that contains all that belongs to the field has
»container-{field.uuid}« for id, label has »label-{field.uuid}« and help text (if exists) has »help-{field.uuid}«
for id.
Used for rendering individual field to table view
"""
def __init__(
self,
*args,
uuid: uuid_module.UUID = None,
display: DisplayMode = None, # None == Leave at default
display_table: DisplayMode = None, # None == Leave at default
display_form: DisplayMode = None, # None == Leave at default
table_classes: str = "",
**kwargs
):
"""
:param args: passed on to inherited constructors
:param uuid: custom specified field UUID. if not specified, it will be assigned automatically
:param display: see DisplayMode enum. Specifies how field will render. Leave at None for default (FULL)
display_form and display_table also accepted for better granularity
:param table_classes: css classes to add to the table column
:param kwargs: passed on to inherited constructors
"""
super().__init__(*args, **kwargs)
self.uuid = uuid or uuid_module.uuid1()
# noinspection PyUnresolvedReferences
self.display_table = (
display_table
or display
or (DisplayMode.FULL if not getattr(self, "write_only", False) else DisplayMode.SUPPRESS)
)
self.display_form = display_form or display or DisplayMode.FULL
self.table_classes = table_classes
@property
def is_rendering_to_list(self):
"""
reports whether we are currently rendering to table or to single record
:return:
"""
try:
# noinspection PyUnresolvedReferences
base = self.parent
while base:
if isinstance(base, ListSerializer):
# If fields parent's parent is the ListSerializer, we're rendering to list
return True
base = base.parent
except:
pass
return False
@property
def is_rendering_to_html(self):
try:
# noinspection PyUnresolvedReferences
return self.context["format"] == "html"
except:
pass
return False
# noinspection PyUnresolvedReferences
def use_pk_only_optimization(self):
"""
Overrides DRF RelatedField's method. It True is returned then value passed for serialisation will be PK value
only, not entire relation object. we don't want that because then we can't resolve the value into something more
human-readable
:return:
"""
if self.is_rendering_to_list and self.is_rendering_to_html:
return False
return super().use_pk_only_optimization()
# noinspection PyUnresolvedReferences
def to_representation(self, value, row_data=None):
"""
Overrides DRF Field's to_representation.
Note that this is also called for the entire record as well as the serializer also is a Field descendant
:param value: value to serialize
:param row_data: instance with row data
:return: serialized value
"""
if self.is_rendering_to_list and self.is_rendering_to_html and self.display_table != DisplayMode.HIDDEN:
# if rentering to html table, let's try to resolve any lookups
# hidden fields will render to tr data-field_name attributes, so we maybe want to have ids, not text there
# we have discussed alternatives but decided that right now a more complete solution is not needed
return self.render_to_table(value, row_data)
check_for_none = value.pk if isinstance(value, PKOnlyObject) else value
if check_for_none is None:
return None
return super().to_representation(value)
def set_display(self, value):
if isinstance(value, tuple):
self.display_form, self.display_table = value
else:
self.display_form = self.display_table = value
display = property(lambda self: self.display_form, set_display)
# noinspection PyUnusedLocal, PyUnresolvedReferences
def render_to_table(self, value, row_data):
"""
Renders field value for table view
:param value: field value
:param row_data: data for entire row (for more complex renderers)
:return: rendered value for table view
"""
get_queryset = getattr(self, "get_queryset", None)
if isinstance(self, ManyRelatedField):
# Hm, not sure if this is the final thing to do: an example of this field is in
# ALC plane editor (modes of takeoff). However, value is a queryset here. There seem to still be DB queries
# However, in the example I have, the problem is solved by doing prefetch_related on the m2m relation
cr = self.child_relation
return ", ".join((cr.display_value(item) for item in value))
# return ', '.join((cr.display_value(item) for item in cr.get_queryset().filter(pk__in=value)))
elif isinstance(self, RelatedField) or get_queryset:
return self.display_value(value)
else:
choices = getattr(self, "choices", {})
# Now that we got our choices for related & choice fields, let's first get the value as it would be by DRF
check_for_none = value.pk if isinstance(value, PKOnlyObject) else value
if check_for_none is None:
value = None
else:
value = super().to_representation(value)
if isinstance(value, Hashable) and value in choices:
# choice field: let's render display names, not values
value = choices[value]
if value is None:
return DYNAMICFORMS.null_text_table
return drftt.format_value(value)
def validate_empty_values(self: DrfField, data):
# noinspection PyUnresolvedReferences
res = super().validate_empty_values(data)
# This is to fix a problem with calculated fields which was only solved in DRF 3.10.
# Forces validation and inclusion of the field into validated data. See comment in original function.
if res == (True, None) and data is None and self.source == "*":
return False, None
return res
def ordering(self):
ordering = []
if hasattr(self, "context") and "view" in getattr(self, "context"):
ordering = getattr(self.context["view"], "ordering", None)
if getattr(self, "field_name") not in getattr(ordering, "fields", []):
return ""
index = -1
direction_asc = True
for idx, o in enumerate(ordering):
if o.startswith("-"):
direction_asc = False
o = o[1:]
if o == getattr(self, "field_name"):
index = idx
if index > -1:
direction_class = ("asc" if direction_asc else "desc") + " seg-%d" % (index + 1)
else:
direction_class = "unsorted"
return "ordering " + direction_class | PypiClean |
/CompassAI-0.0.3.tar.gz/CompassAI-0.0.3/README.md | # CompassAI
CompassAI is a Python library that combines bootstrapping test set, feature importance, fairness evaluation, and model card into one wrapper.
It also includes the following model ratings on a scale of 0-100
1. Performance is model performance evaluated on the test set
2. Reliability is the width of 95% bootstrap confidence interval
3. Feature robustness is proportion of features that cannot be dropped without changing model performance by more than 0.02
4. Fairness is the largest difference of model performance within sensitive group
Metrics that do not have a theoretical maximum such as mean squared error are normalized by dividing by performance of the null model.
## Installation
```bash
pip install -e "git+https://github.com/optum-labs/CompassAI#egg=CompassAI"
```
## Usage
Please see [binary_demo.ipynb](https://github.com/optum-labs/Themis/blob/main/CompassAI/notebooks/binary_demo.ipynb) for full example.
- `metrics` is dictionary of metrics
- `model` must have method `predict`. If model is sklearn pipeline, it should be composed of sklearn.compose.ColumnTransformer and the actual model.
- `X_test` is array-like object of shape (n_samples, n_features)
- `y_test` is array-like objecto of shape (n_samples,) or (n_samples, n_outputs)
- `sensitive_features` is array-like object of shape (n_samples,)
- `model_card_json` is a dictionary with specific schema (see below)
- `path` to specify where model card should be saved
```python
## Common metrics:
# classification_metrics = {"accuracy": accuracy_score,
# "average_precision": average_precision_score
# "f1": f1_score,
# "precision": precision_score,
# "recall": recall_score,
# "roc_auc": roc_auc_score}
# clustering_metrics = {"adj_rand": adjusted_rand_score}
# regression_metrics = {"mae": mean_absolute_error,
# "mse": mean_squared_error,
# "r2": r2_score}
# `See sklearn.metrics for full list <https://scikit-learn.org/stable/modules/model_evaluation.html>`.
import os
from CompassAI import CompassAICard
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
metrics = {"Accuracy": accuracy_score,
"F1": f1_score,
"Precision": precision_score,
"Recall": recall_score}
# All model card dictionary keys are fixed and should not be changed (will throw an error)
model_card_json = \
{
"model_details": {
"name": "Census Dataset",
"overview": "Logistic regression to predict whether income is >=50k",
"owners": [
{
"name": "Pat Samranvedhya",
"contact": "[email protected]"
}
],
"version": {
"name": "0.1",
"date": "4/20/2022"
},
"references": [
{
"reference": "https://fairlearn.org/v0.7.0/api_reference/fairlearn.datasets.html?highlight=fetch_adult"
}
]
},
"considerations": {
"users": [
{
"description": "Data scientist"
},
{
"description": "ML researchers"
}
],
"use_cases": [
{
"description": "Demonstrate CompassAI using Adult census"
}
],
"limitations": [
{
"description": "For demo purposes only"
}
],
"ethical_considerations": [
{
"name": "Performance might not be similar between race groups",
"mitigation_strategy": "None. Parity difference is acceptable."
}
]
}
}
CompassAICard(metrics=metrics,
model=unmitigated_estimator,
X_test=X_test,
y_test=y_test,
sensitive_features=A_test,
model_card_json=model_card_json,
path = os.getcwd()
)
```
## Contributing
Pull requests are welcome. For major changes, please open an issue first to discuss what you would like to change.
Please make sure to update tests as appropriate.
## License
[MIT](https://choosealicense.com/licenses/mit/)
| PypiClean |
/Maxar_OGC_SDK-0.1.tar.gz/Maxar_OGC_SDK-0.1/Maxar_OGC_SDK/process.py | import os
import pyproj
import shapely.ops as ops
from shapely.geometry.polygon import Polygon
from functools import partial
import random
import string
def _response_handler(response):
"""
Function takes in the server response code and responds accordingly.
Returns:
requests response object of server status
"""
if response.status_code != 200:
raise Exception("Non-200 response received for {}.".format(response.url))
elif 'Exception' in response.text:
raise Exception(response.url, response.text)
else:
return response
def area_sqkm(bbox):
"""
Function takes in the bbox and calculates the area in SQKM.
Args:
bbox = String of Coordinates separated by comma
ex: "39.84387,-105.05608,39.95133,-104.94827"
Returns:
float value of area in SQKM
"""
_validate_bbox(bbox)
bboxlst = bbox.split(',')
ymin = float(bboxlst[0])
ymax = float(bboxlst[2])
xmin = float(bboxlst[1])
xmax = float(bboxlst[3])
geom = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)])
geom_area = ops.transform(
partial(
pyproj.transform,
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(
proj='aea',
lat_1=geom.bounds[1],
lat_2=geom.bounds[3]
)
),
geom)
# Print the area in sqkm^2
geomareasqkm = geom_area.area/(10**6)
return geomareasqkm
def _display_image(image):
"""
Function takes in the response object and displays it.
Args:
image = response object from wms method
"""
try:
import IPython.display as disp
from IPython.display import Image, display
except:
raise Exception('Must have IPython installed to display.')
display(disp.Image(image.content))
def _validate_bbox(bbox):
"""
Function takes in the bbox and validates that it is proper format
Args:
bbox = String of Coordinates separated by comma
example = "-105.05608, 39.84387, -104.94827, 39.95133"
"""
try:
miny, minx, maxy, maxx = bbox.split(',')
miny = float(miny)
minx = float(minx)
maxy = float(maxy)
maxx = float(maxx)
except:
raise Exception('Bbox must have exactly 4 coordinates.')
bbox_data = {'min_y': miny, 'min_x': minx, 'max_y': maxy, 'max_x': maxx}
if bbox_data['min_y'] >= bbox_data['max_y']:
raise Exception("Improper order of bbox: min_y is greater than max_y.")
if bbox_data['min_x'] >= bbox_data['max_x']:
raise Exception("Improper order of bbox: min_x is greater than max_x.")
for key in bbox_data.keys():
if 'y' in key:
if bbox_data[key] > 90 or bbox_data[key] < -90:
raise Exception("Improper bbox parameter: {} coordinate outside of range -90:90.".format(key))
elif 'x' in key:
if bbox_data[key] > 180 or bbox_data[key] < -180:
raise Exception("Improper bbox parameter: {} coordinate outside of range -180:180.".format(key))
def download_file(response, format_response=None, download_path=None):
if download_path:
filename = download_path
else:
filename = 'Download.' + format_response
if os.path.isfile(filename):
while os.path.isfile(filename):
filename = filename.split('.')[0] + '_dup' + '.' + filename.split('.')[1]
with open(filename, 'wb') as output_file:
output_file.write(response.content)
return filename
def _remove_cache(querystring):
pool_list = string.digits + string.ascii_letters
random_characters1 = ''.join(i for i in random.choices(pool_list, k=25))
random_characters2 = ''.join(i for i in random.choices(pool_list, k=25))
querystring.update({random_characters1:random_characters2})
return querystring | PypiClean |
/MGEdb-1.1.1-py3-none-any.whl/mgedb/validate.py |
import logging
import re
import sys
from collections import Counter, defaultdict
from typing import Callable
import cattr
import click
from .db import MGEdb, MgeType
from .io import parse_db_fasta_header
LOG = logging.getLogger(__name__)
# Sequence patterns
_SEQ_PATTERN = re.compile(r'^[AGCT]+$', flags=re.I)
_SEQ_AMBIGIOUS_PATTERN = re.compile(r'^[AGCTYRWSKMDVHBXN]+$', flags=re.I)
_SEQ_UNKNOWN_NT_PATTERN = re.compile(r'^[XN]+$', flags=re.I)
# Name patterns
_MGE_NAME_PATTERN = re.compile(r'^[a-z0-9_\-().]+$', flags=re.I)
class ValidationError(Exception):
"""Validation error catergory."""
def __init__(self, names, **kwargs):
self.names = map(str, names)
self.kwargs = kwargs
class InvalidType(ValidationError):
"""Invalid MGE type."""
pass
class InvalidName(ValidationError):
"""Invalid MGE name."""
pass
class DuplicatedEntry(ValidationError):
"""Entries are duplicated"""
pass
class AccessionMissmatch(ValidationError):
"""Invalid accession."""
pass
class SequenceLengthMissmatch(ValidationError):
"""Missmatch of sequence length between record and sequence."""
pass
class SequenceError(ValidationError):
"""Generic errors relating to sequences."""
pass
class NonStandardNucleotide(ValidationError):
"""Non standard nucleotides."""
pass
class UnknownSequence(ValidationError):
"""Non standard nucleotides."""
pass
class InvalidHeaderFormat(ValidationError):
"""Invalid MGE type."""
pass
class DuplicatedSequence(ValidationError):
"""Duplicated sequence."""
pass
class MissingSeqRecord(ValidationError):
"""Errors for missing sequence records."""
pass
class InvalidCoordinate(ValidationError):
"""Errors for missing sequence records."""
pass
def _validate_nomenclature_types_and_names(db) -> None:
"""Validate the MGE nomenclature file."""
LOG.info('Validate nomenclature names')
# Get valid MGE type short hand names from db.py
valid_type_shorthands = [name.value for name in MgeType.__members__.values()]
for mge_type in db.nomenclature:
if not mge_type in valid_type_shorthands:
raise InvalidType(names=mge_type)
# validate nomenclature names
LOG.info('Validate nomenclature names')
for mge_names in db.nomenclature.values():
invalid_names = []
for mge_name in mge_names:
# check if name includes only valid characters
if not re.match(_MGE_NAME_PATTERN, mge_name):
invalid_names.append(mge_name)
if len(invalid_names) != 0:
msg = f'Invalid MGE name in nomenclature file'
raise InvalidName(names=invalid_names, message=msg)
def _validate_record_names(db) -> None:
"""Validate MGE record names."""
LOG.info('Validate record names')
names = db.nomenclature
# cast as mge tpye class
valid_types = [cattr.structure(type, MgeType) for type in names]
records = db.records
for _, r in records.items():
if r.type not in valid_types:
raise InvalidType(names=[r.name])
if r.name not in names[r.type.value]:
msg = f'Invalid MGE {r.type.value} name'
raise ValidationError(names=[r.name], message=msg)
def _validate_record_information(db) -> None:
"""Validate MGE record information."""
LOG.info('Validate record ')
records = db.records
for seq in db.record_sequences:
header = parse_db_fasta_header(seq.title)
rec_seq = records[header['name']].sequences[header['allele_no'] - 1]
# validate accession
if rec_seq.accession != header['accnr']:
raise AccessionMissmatch(names=[rec_seq.accession, header['name']])
# validate lenght mge annotation matches sequence
record_chunks = []
for coords in zip(rec_seq.start, rec_seq.end):
start, end = sorted(coords)
record_chunks.append(range(start, end + 1))
record_length = sum(len(ch) for ch in record_chunks)
if record_length != len(seq.seq):
raise SequenceLengthMissmatch(names=[header['name']])
# validate cds information
for c in rec_seq.cds:
for name in ['start', 'end']:
cds_coord = getattr(c, name)
# check if coordinate are whithin bounds
if record_length <= cds_coord and cds_coord < 0:
raise InvalidCoordinate(f'{name}: {c.start}')
# verify that index name is equal to one in record
for index_mge_name, mge_record in records.items():
if index_mge_name != mge_record.name:
raise InvalidName(names=[index_mge_name, mge_record.name])
# index records with synonyms
synonyms_index = defaultdict(list)
for mge_record in db.records.values():
for synonym in mge_record.synonyms:
synonyms_index[synonym].append(mge_record)
# verify MGE synomyms are not duplicated
for synonym, records in synonyms_index.items():
if len(records) > 1:
# Some MGEs can have the same synonymous names because they could be iso forms
names = [r.name for r in records]
msg = f'Several entries use the same synonymous names: {", ".join(names)}'
LOG.warning(msg)
def _validate_sequence_links(db) -> None:
"""Validate that each record links to correct resources."""
LOG.info('Validate links')
records = db.records
record_headers = {f'{r.name}|{seq_no}|{req_s.accession}'
for r in records.values()
for (seq_no, req_s) in enumerate(r.sequences, start=1)}
seq_file_headers = {seq.title for seq in db.record_sequences}
# get headers missing in either records annotation file or sequences
missing_in_sequences = seq_file_headers - record_headers
missing_in_records = record_headers - seq_file_headers
if len(missing_in_sequences) > 0:
raise MissingSeqRecord(names=missing_in_sequences)
elif len(missing_in_records) > 0:
raise MissingSeqRecord(names=missing_in_records)
def _validate_record_sequences(db):
"""Validate that record sequences are in valid fasta format."""
LOG.info('Validate record sequences')
entries = defaultdict(list)
for seq_entry in db.record_sequences:
entries[seq_entry.seq].append(seq_entry.title)
# Check for duplicated sequences
for seq_titles in entries.values():
if len(seq_titles) > 1:
raise DuplicatedSequence(names=seq_titles)
# Verify that sequence is valid fasta format
for seq, title in entries.items():
title = title[0] # Previously validated to be only one
try:
parse_db_fasta_header(title)
except ValueError:
raise InvalidHeaderFormat(names=[title])
if not seq:
raise SequenceError(names=[title])
if not re.match(_SEQ_PATTERN, seq):
nt_comp = {k: cnt for k, cnt in Counter(seq).most_common()}
if re.match(_SEQ_AMBIGIOUS_PATTERN, seq):
if re.match(_SEQ_UNKNOWN_NT_PATTERN, seq):
raise UnknownSequence(names=[title], **nt_comp)
nt_comp = ' '.join([f'{k}={cnt}' for k, cnt in nt_comp.items()])
LOG.warning(f'Sequence contains ambigious nt: {title}, nt_comp={nt_comp}')
else:
raise NonStandardNucleotide(names=[title], **nt_comp)
VALIDATOR_FNS: Callable[..., None] = [
_validate_sequence_links,
_validate_record_information,
_validate_record_sequences,
_validate_nomenclature_types_and_names,
_validate_record_names,
] | PypiClean |
/FP-SMC-ALS-test1-0.0.1.tar.gz/FP-SMC-ALS-test1-0.0.1/smc/core/general.py | import collections
from smc.base.model import Element
from smc.base.util import element_resolver
from smc.elements.profiles import DNSRelayProfile
from smc.base.structs import NestedDict
from smc.policy.interface import InterfacePolicy
from smc.api.exceptions import LoadPolicyFailed
class SNMP(object):
"""
SNMP configuration details for applying SNMP on an engine.
SNMP requires at minimum an assigned SNMPAgent configuration
which defines the SNMP specific settings (version, community
string, etc). You can also define specific interfaces to enable
SNMP on. By default, if no addresses are specified, SNMP will
be defined on all interfaces.
.. seealso:: :class:`smc.elements.profiles.SNMPAgent`
"""
def __init__(self, engine):
self.engine = engine
@property
def agent(self):
"""
The SNMP agent profile used for this engine.
:rtype: SNMPAgent
"""
return self.engine.from_href(getattr(self.engine, "snmp_agent_ref", None))
@property
def status(self):
"""
Status of SNMP on this engine
:rtype: bool
"""
return bool(getattr(self.engine, "snmp_agent_ref", False))
def disable(self):
"""
Disable SNMP on this engine. You must call `update` on the engine
for this to take effect.
:return: None
"""
self.engine.data.update(snmp_agent_ref=None, snmp_location="", snmp_interface=[])
def enable(self, snmp_agent, snmp_location=None, snmp_interface=None):
"""
Enable SNMP on the engine. Specify a list of interfaces
by ID to enable only on those interfaces. Only interfaces
that have NDI's are supported.
Example of adding SNMP on a port group interface::
engine = Engine('azure')
engine.snmp.enable(SNMPAgent('myagent'), snmp_interface=['SWP_0.1'])
engine.update()
:param str,Element snmp_agent: the SNMP agent reference for this engine
:param str snmp_location: the SNMP location identifier for the engine
:param list snmp_interface: list of interface IDs to enable SNMP
:raises ElementNotFound: unable to resolve snmp_agent
:raises InterfaceNotFound: specified interface by ID not found
"""
agent = element_resolver(snmp_agent)
snmp_interface = [] if not snmp_interface else snmp_interface
interfaces = self._iface_dict(snmp_interface)
self.engine.data.update(
snmp_agent_ref=agent,
snmp_location=snmp_location if snmp_location else "",
snmp_interface=interfaces,
)
def _iface_dict(self, snmp_interface):
return [
values
for interface in snmp_interface
for values in self.engine.interface.get(interface).ndi_interfaces
]
@property
def _nicids(self):
return [str(nic.get("nicid")) for nic in getattr(self.engine, "snmp_interface", [])]
def update_configuration(self, **kwargs):
"""
Update the SNMP configuration using any kwargs supported in the
`enable` constructor. Return whether a change was made. You must call
update on the engine to commit any changes.
:param dict kwargs: keyword arguments supported by enable constructor
:rtype: bool
"""
updated = False
if "snmp_agent" in kwargs:
kwargs.update(snmp_agent_ref=kwargs.pop("snmp_agent"))
snmp_interface = kwargs.pop("snmp_interface", None)
for name, value in kwargs.items():
_value = element_resolver(value)
if getattr(self.engine, name, None) != _value:
self.engine.data[name] = _value
updated = True
if snmp_interface is not None:
_snmp_interface = getattr(self.engine, "snmp_interface", [])
if not len(snmp_interface) and len(_snmp_interface):
self.engine.data.update(snmp_interface=[])
updated = True
elif len(snmp_interface):
if set(self._nicids) ^ set(map(str, snmp_interface)):
self.engine.data.update(snmp_interface=self._iface_dict(snmp_interface))
updated = True
return updated
@property
def location(self):
"""
Return the SNMP location string
:rtype: str
"""
return getattr(self.engine, "snmp_location", None)
@property
def interface(self):
"""
Return a list of physical interfaces that the SNMP
agent is bound to.
:rtype: list(PhysicalInterface)
"""
nics = set([nic.get("nicid") for nic in getattr(self.engine, "snmp_interface", [])])
return [self.engine.interface.get(nic) for nic in nics]
def __repr__(self):
return "{0}(enabled={1})".format(self.__class__.__name__, self.status)
class NTPSettings(object):
"""
This represents the definition of NTP settings.
"""
def __init__(self, engine=None):
self.engine = engine
if engine is None:
self.data = {}
@classmethod
def create(cls, ntp_enable, ntp_servers):
"""
Create a new defintiion of NTPSettings
:param bool ntp_enable: is NTP enabled
:param list(NTPServer) ntp_servers: define one or more NTP Servers.
"""
list_ntp_server_href = [server.href for server in ntp_servers]
cls.data = {"ntp_settings": {
"ntp_enable": ntp_enable,
"ntp_server_ref": list_ntp_server_href
}}
return cls
@property
def ntp_enable(self):
"""
Is NTP enabled? (required)
:rtype: bool
"""
return self.engine.data["ntp_settings"].ntp_enable \
if self.engine is not None else self.data.get("ntp_enable")
@property
def ntp_servers(self):
"""
Define one or more NTP Servers.
:rtype: list(NTPServer)
"""
return [Element.from_href(ntp) for ntp in self.engine.data["ntp_settings"].ntp_server_ref] \
if self.engine is not None else \
[Element.from_href(ntp) for ntp in self.data["ntp_settings"].ntp_server_ref]
class DNSRelay(object):
"""
DNS Relay allows the engine to provide DNS caching or specific
host, IP and domain replies to clients. It can also be used
to sinkhole specific DNS requests.
.. seealso:: :class:`smc.elements.profiles.DNSRelayProfile`
"""
def __init__(self, engine):
self.engine = engine
@property
def status(self):
"""
Status of DNS Relay on this engine.
:rtype: bool
"""
return getattr(self.engine, "dns_relay_profile_ref", False)
def enable(self, interface_id, dns_relay_profile=None):
"""
Enable the DNS Relay service on this engine.
:param int interface_id: interface id to enable relay
:param str,DNSRelayProfile dns_relay_profile: DNSRelayProfile element
or str href
:raises EngineCommandFailed: interface not found
:raises ElementNotFound: profile not found
:return: None
"""
if not dns_relay_profile: # Use default
href = DNSRelayProfile("Cache Only").href
else:
href = element_resolver(dns_relay_profile)
intf = self.engine.interface.get(interface_id)
self.engine.data.update(dns_relay_profile_ref=href)
self.engine.data.update(dns_relay_interface=intf.ndi_interfaces)
def disable(self):
"""
Disable DNS Relay on this engine
:return: None
"""
self.engine.data.update(dns_relay_interface=[])
self.engine.data.pop("dns_relay_profile_ref", None)
def __repr__(self):
return "{0}(enabled={1})".format(self.__class__.__name__, self.status)
class DefaultNAT(object):
"""
Default NAT on the engine is used to automatically create NAT
configurations based on internal routing. This simplifies the
need to create specific NAT rules, primarily for outbound traffic.
.. note:: You must call engine.update() to commit any changes.
"""
def __init__(self, engine):
self.engine = engine
@property
def status(self):
"""
Status of default nat on the engine.
:rtype: bool
"""
return self.engine.data["default_nat"]
def enable(self):
"""
Enable default NAT on this engine
"""
self.engine.data["default_nat"] = True
def disable(self):
"""
Disable default NAT on this engine
"""
self.engine.data["default_nat"] = False
def __repr__(self):
return "{0}(enabled={1})".format(self.__class__.__name__, self.status)
class RankedDNSAddress(object):
"""
A RankedDNSAddress represents a list of DNS entries used as a ranked list to
provide an ordered way to perform DNS queries.
DNS entries can be added as raw IP addresses, or as elements of type
:class:`smc.elements.network.Host`, :class:`smc.elements.servers.DNSServer`
or a dynamic_interface_alias (or combination of all). This is an iterable
class yielding namedtuples of type :class:`.DNSEntry`.
Normal access is done through an engine reference::
>>> list(engine.dns)
[DNSEntry(rank=0,value=8.8.8.8,ne_ref=None),
DNSEntry(rank=1,value=None,ne_ref=DNSServer(name=mydnsserver))]
>>> engine.dns.append(['8.8.8.8', '9.9.9.9'])
>>> engine.dns.prepend(['1.1.1.1'])
>>> engine.dns.remove(['8.8.8.8', DNSServer('mydnsserver')])
.. note:: You must call engine.update() to commit any changes.
"""
def __init__(self, entries):
self.entries = entries
def __iter__(self):
for entry in self.entries:
yield DNSEntry(**entry)
def __len__(self):
return len(self.entries)
def __contains__(self, entry):
for e in self:
try:
if e.ne_ref == entry.href:
return True
except AttributeError:
if e.value == entry:
return True
return False
def _rank_dns(self, entry, prepend=False):
if prepend and len(self) or not len(self):
start_rank = 0
else:
start_rank = self.entries[-1].get("rank") + 1
additions = []
for e in entry:
if e not in self and e not in additions:
additions.append(e)
if not additions:
return
if prepend: # Rerank
for e in self.entries:
e.update((k, v + 1) for k, v in e.items() if k == "rank")
for num, addr in enumerate(additions, int(start_rank)):
try:
self.entries.append({"rank": float(num), "ne_ref": addr.href})
except AttributeError:
self.entries.append({"rank": float(num), "value": addr})
def add(self, values):
return self.append(values)
def append(self, values):
"""
Add DNS entries to the engine at the end of the existing list (if any).
A DNS entry can be either a raw IP Address, or an element of type
:class:`smc.elements.network.Host` or :class:`smc.elements.servers.DNSServer`.
:param list values: list of IP addresses, Host and/or DNSServer elements.
:return: None
.. note:: If the DNS entry added already exists, it will not be
added. It's not a valid configuration to enter the same DNS IP
multiple times. This is also true if the element is assigned the
same address as a raw IP address already defined.
"""
self._rank_dns(values)
def prepend(self, values):
"""
Prepend DNS entries to the engine at the beginning of the existing list
(if any). A DNS entry can be either a raw IP Address, or an element of type
:class:`smc.elements.network.Host` or :class:`smc.elements.servers.DNSServer`.
:param list values: list of IP addresses, Host and/or DNSServer elements.
:return: None
"""
self._rank_dns(values, prepend=True)
def remove(self, values):
"""
Remove DNS entries from this ranked DNS list. A DNS entry can be either
a raw IP Address, or an element of type :class:`smc.elements.network.Host`
or :class:`smc.elements.servers.DNSServer`.
:param list values: list of IP addresses, Host and/or DNSServer elements.
:return: None
"""
removables = []
for value in values:
if value in self:
removables.append(value)
if removables:
self.entries[:] = [
entry._asdict()
for entry in self
if entry.value not in removables and entry.element not in removables
]
# Rerank to maintain order
for i, entry in enumerate(self.entries):
entry.update(rank="{}".format(i))
class DNSEntry(collections.namedtuple("DNSEntry", "value rank ne_ref")):
"""
DNSEntry represents a single DNS entry within an engine
DNSAddress list.
:ivar str value: IP address value of this entry (None if type Element is used)
:ivar int rank: order rank for the entry
:ivar str ne_ref: network element href of entry. Use element property to resolve
to type Element.
:ivar Element element: If the DNS entry is an element type, this property
will returned a resolved version of the ne_ref field.
"""
__slots__ = ()
def __new__(cls, rank, value=None, ne_ref=None): # @ReservedAssignment
return super(DNSEntry, cls).__new__(cls, value, rank, ne_ref)
@property
def element(self):
return Element.from_href(self.ne_ref)
def __repr__(self):
return "DNSEntry(rank={0},value={1},ne_ref={2})".format(self.rank, self.value, self.element)
class Layer2Settings(NestedDict):
"""
Layer 2 Settings are only applicable on Layer 3 Firewall engines
that want to run specific interfaces in layer 2 mode. This
requires that a Layer 2 Interface Policy is applied to the engine.
You can also set connection tracking and bypass on overload
settings for these interfaces as well.
Set policy for the engine::
engine.l2fw_settings.enable(InterfacePolicy('mylayer2'))
:ivar bool bypass_overload_traffic: whether to bypass traffic on overload
:ivar str tracking_mode: connection tracking mode
.. note:: You must call engine.update() to commit any changes.
.. warning:: This feature requires SMC and engine version >= 6.3
"""
def __init__(self, engine):
l2 = engine.data["l2fw_settings"]
super(Layer2Settings, self).__init__(data=l2)
def connection_tracking(self, mode):
"""
Set the connection tracking mode for these layer 2 settings.
:param str mode: normal, strict, loose
:return: None
"""
if mode in ("normal", "strict", "loose"):
self.update(tracking_mode=mode)
def bypass_on_overload(self, value):
"""
Set the l2fw settings to bypass on overload.
:param bool value: boolean to indicate bypass setting
:return: None
"""
self.update(bypass_overload_traffic=value)
def disable(self):
"""
Disable the layer 2 interface policy
"""
self.pop("l2_interface_policy_ref", None)
def enable(self, policy):
"""
Set a layer 2 interface policy.
:param str,Element policy: an InterfacePolicy or str href
:raises LoadPolicyFailed: Invalid policy specified
:raises ElementNotFound: InterfacePolicy not found
:return: None
"""
if hasattr(policy, "href"):
if not isinstance(policy, InterfacePolicy):
raise LoadPolicyFailed(
"Invalid policy type specified. The policy" "type must be InterfacePolicy"
)
self.update(l2_interface_policy_ref=element_resolver(policy))
@property
def policy(self):
"""
Return the InterfacePolicy for this layer 3 firewall.
:rtype: InterfacePolicy
"""
return InterfacePolicy.from_href(self.get("l2_interface_policy_ref"))
def __repr__(self):
return "{0}(policy={1})".format(self.__class__.__name__, self.policy) | PypiClean |
/EggTranslations-1.2.1.tar.gz/EggTranslations-1.2.1/README.txt | ========================
EggTranslations Overview
========================
EggTranslations is a flexible object-oriented resource
loader that is designed to work in projects that are
distributed as `Python eggs`_. Its main goals are to
support localization of resources (e.g. images, help
files and other documentation), and to allow localization
of text in your application by loading `GNU gettext`_
``.mo`` files. However, you don't have to be dealing with
localizable resources to use EggTranslations: You can
use it to organize non-localizable resources, too.
.. _Python eggs: http://peak.telecommunity.com/DevCenter/PythonEggs
.. _GNU gettext: http://www.gnu.org/software/gettext/
.. contents::
Source and Installation
=======================
The (read-only) subversion trunk is available at:
`http://svn.osafoundation.org/eggtranslations/trunk`_
.. _http://svn.osafoundation.org/eggtranslations/trunk: http://svn.osafoundation.org/eggtranslations/trunk#egg=eggtranslations-dev
EggTranslations is shipped as an `easy_install`_-able source
package. So, you can install it directly, or list it as a
dependency if you're using setuptools_.
How it works
============
The big idea here is that you can have a project or application
that you ship as a python egg, but whose localized resources
live in entirely separate python eggs. So, you can ship your
project's translations separately, so long as you package
resources as outlined below, and use EggTranslation APIs
to look up these resources.
EggTranslations works by combining the following:
* A set of eggs, each containing a **configuration file**,
called ``resources.ini`` by default. This file is located
in each egg's ``.egg-info`` directory.
* **Resource files**, also contained in the ``.egg-info``
directory.
* A **translations** object (an instance of ``EggTranslations``
or a subclass thereof). Each EggTranslations instance can
customize the locale set it supports, the name of the
configuration file to parse, and whether to employ locale set
fallback_ for localization and resource look up.
Let's look at each of these in turn:
The configuration (``resources.ini``) file
------------------------------------------
This file is in `INI file format`_, as parsed by the
`configobj`_ parser. This means it consists of *parameters*
(key-value pairs), divided into *sections*.
Sections
~~~~~~~~
Here's an example (empty) section::
[MyGreatProject::en]
The string before the ``::`` identifies the project you're
specifying resources for. (You'll later pass this project
name into various ``EggTranslation`` methods to read or
retrieve resources from the egg).
The string after the ``::`` specifies which locales this
section applies to. In general, you can supply a comma-separated
list of locales, e.g.::
[MyGreatProject::es, es_UY]
would specify that these resources apply to both Spanish (``es``) and
Uruguyan Spanish (``es_UY``). The localizations of ``MyGreatProject``
can be shipped in different Python eggs.
The ``all`` locale
~~~~~~~~~~~~~~~~~~
The string ``all`` as a locale name is special: It is used
to specify that the parameters that follow can be used as a
fallback_ (i.e. are used if no other value is found). Another
way of looking at this is that you can use ``all`` to specify
where to find non-localized resources.
Parameters
~~~~~~~~~~
Each key-value pair you specify in a section can be one of:
1. A translated **string value**, e.g. ::
status_message = Unable to load file.
2. A **path** relative to your egg's ``.egg-info`` directory::
help_files = resources/help
We'll examine how to use these in code below, but for now
let's note that there are several uses for the 2nd case here:
You can point at an entire directory of resources or at individual
resource files. In particular, you can also specify a gettext
``.mo`` (binary message object file), which will contain
translations for a particular locale.
Resource files
--------------
As mentioned before, all resource files are stored within
directories beneath ``.egg-info``. Note that since we are
`accessing resources`_ using the ``pkg_resources`` API, all
paths should be specified in your config file using '/' as
path separator, not an OS-dependent path separator.
While the most common cases of localizable files are
documentation and string translations, it's not uncommon
to allow localization of image resources, too (the most
infamous example is the octagonal "stop sign" icon, which
doesn't make sense in all locales).
Translation objects: The ``EggTranslations`` class
--------------------------------------------------
The ``EggTranslations`` constructor takes no arguments::
>>> from egg_translations import *
>>> translation = EggTranslations()
There is a separate initialization step where
you pass in the locale set you're interested in::
>>> translation.initialize(['fr_CA', 'fr'])
The reason for this is that frequently you'll set up
your object as a global, but will want to read the
user's preferred set of locales from the operating
system (e.g. from a 3rd-party library such as
`PyICU`_) or from some kind of preference persistent
settings.
.. _fallback:
``EggTranslations.initialize`` also takes a Boolean
``fallback`` keyword argument, which defaults to ``True``.
If you set it to ``False`` you will disable finding
resources in the ``all`` pseudo-locale, unless you
explicitly pass in ``"all"`` to the various resources
lookup/retrieval APIs.
``EggTranslations`` supports several methods for retrieving
resources. For example, if your ``resources.ini`` file contained
an entry::
[MyProject::all]
readme = docs/README.txt
you could get the contents of ``README.txt`` as a unicode string [#]_ via::
translation.getResourceAsString("MyProject", "readme")
This would allow localizers to translate ``README.txt``, so long
as they shipped it in an egg with a suitable ``resources.ini``. The
simplest way to do this is to have the translation egg match
the filesystem layout of MyProject's egg::
[MyProject:es]
readme = docs/README.txt
There's no particular requirement to do this, so long as the config
file entry points at a valid file. In other words, the Spanish
translation egg could have an entry::
[MyProject:es]
readme = docs/es/LEER
and the code using ``getResourceAsString()`` above would work
in either locale, so long as the file ``LEER`` was located
in ``docs/es`` beneath the ``.egg-info`` directory.
Depending on what type of resource you have, there are various
``EggTranslations`` methods that will help to discover or
extract resources. Besides the above, there's also a ``getText``
method that can be used to look up a string's translation in a
``.mo`` file.
For more details on accessing the contents of resource files, see
the `full documentation` for the ``EggTranslations`` class.
.. [#] All ``EggTranslations`` methods returning a ``unicode`` default
to assuming UTF-8 encoding, but can be overridden using the
``encoding`` keyword argument.
More on Locales
===============
EggTranslations assumes that a locale is an ASCII string
consisting of a two-letter language code, optionally followed
by an underscore and a two-letter country code, like ``"en"``
or ``"zh_TW"``. It will attempt to canonicalize locales
(i.e. truncate them if longer, and/or correct the case
of the country and language codes).
Some libraries (e.g. ICU) use locale names using a slightly
different format. If you want to use these, you should
subclass ``EggTranslations`` and override the
``normalizeLocale()``, ``isValidLocaleForm()`` and
``stripEncodingCode()`` methods.
Putting it all Together
=======================
A common arrangement is to ship an application containing
fallback ("``all``") resources in its ``.egg-info``, and
then ship its localizations as plugin eggs. For example,
this is how Chandler_ packages its various translation
files.
Since ``EggTranslations`` `listens for egg activations`_,
this allows the application to detect new translations
automatically, so long as the ``EggTranslations`` instance
has been ``initialize()``-ed before the translation
plugins have been loaded.
Feedback
========
Feedback, comments or questions are welcome, either via
the `chandler-dev mailing list`_, or on IRC_.
Example configuration file
==========================
For reference, here is an example resource.ini file::
# This is an example comment in a resource
# ini file
[myEggProject::all]
welcome.message=Greetings from my egg #This is the default message my
#users will see.
default.image = resource/default.png #This is the default image my
#users will see.
default.directory = resource
[myEggProject::fr_CA, fr_FR, fr] #All of these locales will
#use the entries defined below
welcome.message = Bonjour
default.image = locale/fr/resources/default.png
###
# This gettext catalog
# will automatically get
# loaded if the EggTranslations
# locale set contains one or more or the
# following 'fr_CA', 'fr_FR', 'fr'
###
gettext.catalog = locale/fr/myproject.mo
default.directory locale/fr/resources
[myEggProject::es_UY, es]
welcome.message = Hola
default.image = locale/es/resources/default.png
###
# This gettext catalog will automatically get
# loaded if the EggTranslations
# locale set contains one or more or the
# following 'es_UY', 'es'
###
gettext.catalog = locale/es/myproject.mo
default.directory = locale/es/resources
[yourEggProject::fr]
getext.catalog=locale/fr/yourproject.mo
.. _easy_install: http://peak.telecommunity.com/DevCenter/EasyInstall
.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
.. _INI file format: http://en.wikipedia.org/wiki/INI_file
.. _configobj: http://www.voidspace.org.uk/python/configobj.html
.. _PyICU: http://pyicu.osafoundation.org/
.. _accessing resources: http://peak.telecommunity.com/DevCenter/PythonEggs#accessing-package-resources
.. _listens for egg activations: http://peak.telecommunity.com/DevCenter/PkgResources#receiving-change-notifications
.. _full documentation: http://packages.python.org/EggTranslations/
.. _Chandler: http://chandlerproject.org/
.. _chandler-dev mailing list: http://chandlerproject.org/mailinglists#ChandlerDevelopment
.. _IRC: http://chandlerproject.org/irc
Changelog
=========
1.2.1 (2009/06/26)
------------------
* Fix some SyntaxWarnings in Python 2.6, which arose from incorrect use of
the assert statement.
[grant]
1.2 (2009/02/11)
----------------
* Cleaned up project and docs for Cheeseshop upload.
[grant]
* Added API for locale normalization, which allows subclasses to implement,
say, ICU locale names like "zh_Hans".
[grant]
1.1 (2007/10/15)
----------------
* Added the ability to discover what locales are available for a particular
project. This info is very handy and can map directly to a UI locale picker
widget.
[bkirsch]
* Expanded the exception handling of hasTranslation.
[bkirsch]
1.0 (2007/04/25)
----------------
* Moved the version to 1.0 since the package has been in use for 6 months
in Chandler with no bugs reported.
[bkirsch]
* Add the hasTranslation method which checks if a gettext .mo file exists
and is loaded for the given locale
[bkirsch]
<1.0
----
* Created separate project from http://svn.osafoundation.org/chandler/trunk/chandler/projects/EggTranslations-plugin (rev 13152)
[bear]
| PypiClean |
/MGP_SDK-1.1.1.tar.gz/MGP_SDK-1.1.1/src/MGP_SDK/OGC_Spec/wmts.py | import requests
import MGP_SDK.process as process
from MGP_SDK.auth.auth import Auth
from pyproj import Transformer
import math
class WMTS:
def __init__(self, auth: Auth, endpoint):
self.auth = auth
self.version = auth.version
self.api_version = auth.api_version
self.endpoint = endpoint
if self.endpoint == 'streaming':
self.base_url = f'{self.auth.api_base_url}/streaming/{self.api_version}/ogc/gwc/service/wmts'
elif self.endpoint == 'basemaps':
self.base_url = f'{self.auth.api_base_url}/basemaps/{self.api_version}/seamlines/gwc/service/wmts'
# TODO handle raster / vector
self.token = self.auth.refresh_token()
self.authorization = {"Authorization": f"Bearer {self.token}"}
self.querystring = self._init_querystring()
def wmts_convert(self, zoom_level, laty, longx, crs=None):
"""
Function converts a lat long position to the tile column and row needed to return WMTS imagery over the area
Args:
zoom_level (int) = The desired zoom level
laty (int) = The desired latitude
longx (int) = The desired longitude
crs (string) = Desired projection. Defaults to None
Returns:
String values of the Tile Row and the Tile Column
"""
if crs == 'EPSG:4326' or not crs:
# GetCapablities call structure changed from SW2 to SW3, hardcoded tileMatrixSets instead of restructuring
# the XML parser
tileMatrixSet = {0: {'MatrixWidth': 2, 'MatrixHeight': 1}, 1: {'MatrixWidth': 4, 'MatrixHeight': 2}, 2:
{'MatrixWidth': 4, 'MatrixHeight': 2}, 3:{'MatrixWidth': 16, 'MatrixHeight': 8}, 4:
{'MatrixWidth': 32, 'MatrixHeight': 16}, 5: {'MatrixWidth': 64, 'MatrixHeight': 32}, 6:
{'MatrixWidth': 128, 'MatrixHeight': 64}, 7: {'MatrixWidth': 256, 'MatrixHeight': 128}, 8:
{'MatrixWidth': 512, 'MatrixHeight': 256}, 9: {'MatrixWidth': 1024, 'MatrixHeight': 512}, 10:
{'MatrixWidth': 2048, 'MatrixHeight': 1024}, 11: {'MatrixWidth': 4096, 'MatrixHeight': 2048}, 12:
{'MatrixWidth': 8192, 'MatrixHeight': 4096}, 13: {'MatrixWidth': 16384, 'MatrixHeight': 8192}, 14:
{'MatrixWidth': 32769, 'MatrixHeight': 16384}, 15: {'MatrixWidth': 65536, 'MatrixHeight': 32768}, 16:
{'MatrixWidth': 131072, 'MatrixHeight': 65536}, 17: {'MatrixWidth': 262144, 'MatrixHeight': 131072}, 18:
{'MatrixWidth': 524288, 'MatrixHeight': 262144}, 19:{'MatrixWidth': 1048576, 'MatrixHeight': 524288}, 20:
{'MatrixWidth': 2097152, 'MatrixHeight': 1048576}, 21: {'MatrixWidth': 4194304, 'MatrixHeight': 2097152}}
for i in tileMatrixSet:
if i == zoom_level:
matrixwidth = tileMatrixSet[i]['MatrixWidth']
matrixheight = tileMatrixSet[i]['MatrixHeight']
if not matrixwidth or not matrixheight:
raise Exception('Unable to determine Matrix dimensions from input coordinates')
return str(round((float(longx) + 180) * (int(matrixwidth)/360))), str(round((90 - float(laty)) * (int(matrixheight)/180)))
else:
transformer = Transformer.from_crs(crs, "EPSG:4326")
x2, y2 = transformer.transform(longx, laty)
def deg2num(lat_deg, lon_deg, zoom):
lat_rad = math.radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - math.asinh(math.tan(lat_rad)) / math.pi) / 2.0 * n)
return (str(xtile), str(ytile))
tiles = deg2num(y2, x2, zoom_level)
return tiles
def wmts_get_tile(self, tilerow, tilecol, zoom_level, **kwargs):
"""
Function executes the wmts call and returns a response object of the desired tile
Args:
tilerow (string) = The desired tile row
tilecol (string) = The desired tile column
zoom_level (int) = The desired zoom level
Returns:
WMTS tiles for the input data
"""
token = self.auth.refresh_token()
authorization = {'Authorization': 'Bearer {}'.format(token)}
querystring = self.querystring
querystring['TileMatrix'] = querystring['tileMatrixSet'] + ':' + str(zoom_level)
querystring['tilerow'] = tilerow
querystring['tilecol'] = tilecol
querystring['request'] = 'GetTile'
response = requests.request("GET", self.base_url, headers=authorization, params=self.querystring, verify=self.auth.SSL)
process_response = process._response_handler(response)
return process_response
def wmts_bbox_get_tile_list(self, zoom_level, bbox, crs="EPSG:4326", **kwargs):
"""
Function takes in a bbox and zoom level to return a list of WMTS calls that can be used to aquire all the wmts
tiles. Projection defaults to EPSG:4326
Args:
zoom_level (int) = The desired zoom level
bbox (string) = Bounding box of AOI. Comma delimited set of coordinates. (miny,minx,maxy,maxx)
Returns:
List of WMTS calls.
"""
process._validate_bbox(bbox, srsname=crs)
bbox_list = [i for i in bbox.split(',')]
miny = float(bbox_list[0])
minx = float(bbox_list[1])
maxy = float(bbox_list[2])
maxx = float(bbox_list[3])
min_tilerow, min_tilecol = self.wmts_convert(zoom_level, miny, minx, crs)
max_tilerow, max_tilecol = self.wmts_convert(zoom_level, maxy, maxx, crs)
if max_tilerow < min_tilerow:
swap = max_tilerow
max_tilerow = min_tilerow
min_tilerow = swap
if max_tilecol < min_tilecol:
swap = max_tilecol
max_tilecol = min_tilecol
min_tilecol = swap
tiles = []
row_col = []
for i in range(int(min_tilecol), int(max_tilecol) + 1):
for j in range(int(min_tilerow), int(max_tilerow) + 1):
querystring = self.querystring
querystring['request'] = 'GetTile'
querystring['tileMatrixSet'] = crs
querystring['TileMatrix'] = querystring['tileMatrixSet'] + ':' + str(zoom_level)
querystring['TileRow'] = i
querystring['TileCol'] = j
tiles.append(self.base_url + '?' + "&".join("{}={}".format(key, value) for key, value in querystring.items()))
row_col.append((querystring['TileRow'], querystring['TileCol'], zoom_level))
combined = [tiles, row_col]
return combined
def _init_querystring(self):
typename = ''
if self.endpoint == 'streaming':
typename = 'Maxar:Imagery'
elif self.endpoint == 'basemaps':
typename = 'Maxar:seamline'
# TODO handle raster / vector
querystring = {
'service': 'WMTS',
'request': 'GetTile',
'version': '1.0.0',
'tileMatrixSet': 'EPSG:4326',
'Layer': typename,
'Format': 'image/jpeg',
'SDKversion': '{}'.format(self.version)
}
return querystring | PypiClean |
/Mtrax-2.2.07.zip/Mtrax-2.2.07/mtrax/hindsight.py | import numpy as num
from params import params
import ellipsesk as ell
import estconncomps as est
import kcluster
import matchidentities
import pylab as mpl
DEBUG = False
class MyDictionary(dict):
def __init__(self,emptyval=None):
self.emptyval = emptyval
dict.__init__(self)
def __getitem__(self,i):
if not self.has_key(i):
return self.emptyval
else:
return dict.__getitem__(self,i)
class Milestones:
def __init__(self,tracks,T=None):
if T is None:
T = len(tracks)
# if t is not a key to frame2births, frame2deaths, return an empty set
self.frame2births = MyDictionary(set([]))
self.frame2deaths = MyDictionary(set([]))
# if id is not a key to target2birth, target2death, return -inf, +inf
self.target2birth = MyDictionary(-num.inf)
self.target2death = MyDictionary(num.inf)
if T == 0:
return
for t in range(T):
self.update(tracks,t)
def update(self,tracks,t=None):
if DEBUG: print 'start of update: target2birth = ' + str(self.target2birth)
if t is None:
t = len(tracks)-1
if t < 0:
newborns = set([])
newdeaths = set([])
elif t == 0:
# if this is the first frame, nothing is born and nothing dies
newborns = set([])
newdeaths = set([])
else:
# otherwise, newborns are anything alive in this frame that was not alive
# in the previous frame
newborns = set(tracks[t].keys()) - set(tracks[t-1].keys())
if DEBUG: print 't = %d, tracks[t].keys = '%t + str(tracks[t].keys()) + ', tracks[t-1].keys = ' + str(tracks[t-1].keys())
# newdeaths are anything not alive in this frame that was alive in
# the previous frame
newdeaths = set(tracks[t-1].keys()) - set(tracks[t].keys())
if DEBUG: print 't = %d, newborns = '%t + str(newborns) + ', newdeaths = ' + str(newdeaths)
# add newborns to birth data structures
if len(newborns) > 0:
self.frame2births[t] = newborns
for id in newborns:
self.target2birth[id] = t
if DEBUG: print 'target2birth[%d] set to %d'%(id,self.target2birth[id])
# add newdeaths to death data structures
if len(newdeaths) > 0:
self.frame2deaths[t] = newdeaths
for id in newdeaths:
self.target2death[id] = t
if DEBUG: print 'target2death[%d] set to %d'%(id,t)
if DEBUG: print 'end of update: target2birth = ' + str(self.target2birth)
def getbirths(self,t):
return self.frame2births[t]
def getdeaths(self,t):
return self.frame2deaths[t]
def getbirthframe(self,id):
if DEBUG: print 'target2birth[%d] = %f'%(id,self.target2birth[id])
if self.target2birth.has_key(id):
return self.target2birth[id]
else:
return -num.inf
def getdeathframe(self,id):
if self.target2death.has_key(id):
return self.target2death[id]
else:
return num.inf
def deleteid(self,id):
if self.target2birth.has_key(id):
self.frame2births[self.target2birth.pop(id)].discard(id)
if self.target2death.has_key(id):
self.frame2deaths[self.target2death.pop(id)].discard(id)
def setdeath(self,id,frame):
if self.target2death.has_key(id):
self.frame2deaths[self.target2death.pop(id)].discard(id)
if not num.isinf(frame):
self.target2death[id] = frame
if len(self.frame2deaths[frame]) == 0:
self.frame2deaths[frame] = set([id,])
else:
self.frame2deaths[frame].add(id)
def setbirth(self,id,frame):
if self.target2birth.has_key(id):
self.frame2births.discard(self.target2birth.pop(id))
self.target2birth[id] = frame
if len(self.frame2births[frame]) == 0:
self.frame2births[frame] = set([id,])
else:
self.frame2births[frame].add(id)
def __str__(self):
s = ' id birth death\n'
for (id,birth) in self.target2birth.iteritems():
s += '%3d'%id
s += '%5d'%birth
if self.target2death.has_key(id):
s += '%7d'%self.target2death[id]
else:
s += ' ?'
s += '\n'
return s
class Hindsight:
def __init__(self,tracks,bg):
self.tracks = tracks
self.bg = bg
self.maxdcenters = params.maxshape.major*4+params.maxshape.minor
def initialize_milestones(self):
self.milestones = Milestones(self.tracks)
def fixerrors(self,T=None):
if T is None:
T = len(self.tracks)
# update milestones for this frame
self.milestones.update(self.tracks,T-1)
if T < 3:
return
# we look for events that end in frame T-3
# we would like to use frames T-2, T-1 to predict position in T-3
# events:
#
# splitdetection:
# death of id1 in frame T-2 (i.e. id1 alive in frame T-3, not in frame T-2)
# birth of id1 in frame t1 (i.e. id1 alive in frame t1, not in frame t1-1)
# T - 3 - t1 - 1 <= params.splitdetection_length
# id2 that can be merged with id1 in all frames t1:T-3
# or
# death of id1 in frame T-2 (i.e. id1 alive in frame T-3, not in frame T-2)
# birth of id2 in frame t1 (i.e. id2 alive in frame t1, not in frame t1-1)
# T - 3 - t1 - 1 <= params.splitdetection_length
# id2 can be merged with id1 in all frames t1:T-3
# for each death in frame T-2
didfix = False
deathscurr = list(self.milestones.getdeaths(T-2))
if params.do_fix_split:
for id1 in deathscurr:
didfix |= self.fix_splitdetection(id1,T-2)
deathscurr = list(self.milestones.getdeaths(T-2))
if params.do_fix_spurious:
for id1 in deathscurr:
didfix |= self.fix_spuriousdetection(id1,T-2)
# for each birth in frame T-2
birthscurr = list(self.milestones.getbirths(T-2))
if params.do_fix_merged:
for id2 in birthscurr:
didfix |= self.fix_mergeddetection(id2,T-2)
birthscurr = list(self.milestones.getbirths(T-2))
if params.do_fix_lost:
for id2 in birthscurr:
didfix |= self.fix_lostdetection(id2,T-2)
def fix_spuriousdetection(self,id,t2):
print 'testing to see if death of id=%d in frame t2=%d is from a spurious detection'%(id,t2)
t1 = self.milestones.getbirthframe(id)
lifespan = t2 - t1
if lifespan > params.spuriousdetection_length:
print 'lifespan longer than %d, not spurious, not deleting'%params.spuriousdetection_length
return False
#elif (type(t1) is not num.dtype('int')) or \
# (type(t2) is not num.dtype('int')):
# print 'track birth: ' + str(t1) + ', track death: ' + str(t2) + ' are not both integers.'
# print 'type(t1) = ' + str(type(t1))
# print 'type(t2) = ' + str(type(t2))
# return False
# delete this track
for t in range(int(t1),int(t2)):
tmp = self.tracks[t].pop(id)
self.milestones.deleteid(id)
if DEBUG: print 'deleted track for id=%d with life span=%d'%(id,lifespan)
return True
def fix_lostdetection(self,id2,t2):
if DEBUG: print 'testing to see if birth of id2=%d in frame t2=%d is from a lost detection'%(id2,t2)
# look for death of some target id1 between t2-1 and t2-params.lostdetection_length+1
# where the distance between the predicted position of id1 in frame t2 is near the
# actual position of id2
T = len(self.tracks)
# initialize previous and current positions
curr = ell.TargetList()
prev = ell.TargetList()
curr[id2] = self.tracks[t2][id2]
if (t2 < T-1) and (self.tracks[t2+1].hasItem(id2)):
prev[id2] = self.tracks[t2+1][id2]
else:
prev[id2] = self.tracks[t2][id2]
# initialize the distances
mind = []
ids = []
# loop through all frames death can take place on
t3 = int(round(max(t2-params.lostdetection_length,0)))
t2 = int(t2)
for t1 in range(t2-1,t3,-1):
# update predicted location
pred = matchidentities.cvpred(prev,curr)
# compute distance to all targets from predicted location
for id1 in list(self.milestones.getdeaths(t1)):
mind.append(pred[id2].dist(self.tracks[t1-1][id1]))
ids.append(id1)
# update prev and curr positions
prev = curr
curr = pred
if len(mind) == 0:
if DEBUG: print 'no deaths within %d frames of birth of id2=%d in frame t2=%d'%(params.lostdetection_length,id2,t2)
return False
# compute closest newborn to predicted location
i = num.argmin(num.array(mind))
mind = mind[i]
id1 = ids[i]
# if this is too far, then we can't fix
if mind > params.lostdetection_distance:
if DEBUG: print 'id1=%d dies in frame %d, but distance between predicted positions = %.2f > %.2f'%(id1,self.milestones.getdeathframe(id1),mind,params.lostdetection_length)
return False
# get death frame of id1
t1 = self.milestones.getdeathframe(id1)
# add in tracks in frames t1 thru t2-1
# by interpolating
start = self.tracks[t1-1][id1]
end = self.tracks[t2][id2]
for t in range(t1,t2):
self.tracks[t][id1] = ellipseinterpolate(start,end,t-t1+1,t2-t)
# replace identity id2 in frames t2 thru death of id2 with id1
for t in range(t2,len(self.tracks)):
if not self.tracks[t].hasItem(id2):
if DEBUG: print 'no id2=%d in frame t=%d'%(id2,t)
break
tmp = self.tracks[t].pop(id2)
tmp.identity = id1
self.tracks[t][id1] = tmp
# update death, birth frame data structures
d2 = self.milestones.getdeathframe(id2)
# remove id2 from all data structures
self.milestones.deleteid(id2)
# reset death of id1 as d2
self.milestones.setdeath(id1,d2)
if DEBUG: print 'fixing lost detection: id1=%d lost in frame t1=%d, found again in frame t2=%d with id2=%d'%(id1,t1,t2,id2)
return True
def fix_mergeddetection(self,id3,t2):
if DEBUG: print 'testing to see if birth of id3=%d in frame t2=%d can be fixed by splitting'%(id3,t2)
if DEBUG: print 'tracks[%d][%d] = '%(t2,id3) + str(self.tracks[t2][id3])
if t2 < 2:
if DEBUG: print 't2=%d too small'%t2
return False
# requires birth of id3 in frame t2
# requires death of id1 in frame t1
# requires target id2 that can be split into id1 and id2 from frames t1 to t2-1
# with low cost
# for each id2 in frame t2-1:
# whose center is within distance maxdcenter
# of predicted center position of target id3 in frame t2-1
# for each t1 in t2-length:t2-1:
# for each id1 that dies in frame t1:
# such that the distance between the predicted position of id1 in frame t1
# and the center of id2 in frame t1 is smaller than maxdcenter
# try splitting id2 into two targets
# compute the optimal matching for frames t1:t2 between the newly split targets,
# id1, and id3
T = len(self.tracks)
# predicted position of id3 in t2-1 using position of id3 in t2, ...
prev = self.tracks[min(T-1,t2+1)]
curr = self.tracks[t2]
pred3 = self.cvpred(prev,curr,id3)
if DEBUG: print 'predicted position of id3=%d in t2-1=%d using position in t2=%d: '%(id3,t2-1,t2) + str(pred3)
# initialize list of potential (id1,id2) pairs
# this only approximates the distance from the split id2 to id3, split id2 to id1.
# it does not actually do the splitting
possible = self.initialize_possibleid2id1pairs(id3,t2,pred3)
if DEBUG: print 'possible (id2,id1) pairs: ' + str(possible)
# if no targets are sufficiently close, return
if len(possible) == 0:
if DEBUG: print 'no target centers are within distance %.2f of predicted position of target id3=%d in frame t2-1=%d'%(self.maxdcenters,id3,t2-1)
return False
if DEBUG: print 'based only on position of centers in frame t2-1=%d and deathframe(id1), possible id1,id2 pairs: '%(t2-1) + str(possible)
# compute the predicted positions of id2 in t2-1 from t2,...
pred2_t2 = self.pred_id2_t2(prev,curr,possible)
if DEBUG: print 'predicted positions of id2 in t2-1=%d: '%(t2-1) + str(pred2_t2)
# actually compute the clusterings of id2 at t2-1
clusterings_t2 = self.cluster_id2_t2(t2-1,possible,pred3,pred2_t2)
if DEBUG: print 'clusterings of id2 at t2-1=%d: '%(t2-1) + str(clusterings_t2)
# compute the cost, optimal assignment for each clustering
next = self.tracks[t2-1]
(cost_t2,assignments_t2) = self.compute_cost_and_assignment(clusterings_t2,prev,curr,next,
pred3,pred2_t2)
# update possible based on true costs
self.update_possible_t2(possible,cost_t2)
if len(possible) == 0:
if DEBUG: print 'performed clustering of all id2 in frame t2-1=%d, no resulting centers within a distance %.2f of predicted position of id3=%d'%(t2-1,params.mergeddetection_distance,id3)
return False
if DEBUG: print 'based only on clustering of %d in frame t2-1=%d, possible id1,id2 pairs: '%(id3,t2-1) + str(possible)
# predict position of targets id2, id1 in frame t1
(pred2_t1,pred1) = self.pred_t1(possible)
if DEBUG: print 'pred1 = ' + str(pred1)
if DEBUG: print 'pred2_t1 = ' + str(pred2_t1)
# actually compute the clusterings of id2 at t1 for each t1
clusterings_t1 = self.cluster_id2_t1(possible,pred2_t1,pred1)
# compute cost, optimal assignment for each clustering
(cost_t1,assignments_t1) = self.compute_cost_and_assignment_t1(clusterings_t1,possible,
pred2_t1,pred1)
# update possible based on true costs
self.update_possible_t1(possible,cost_t1)
if len(possible) == 0:
if DEBUG: print 'performed clustering of all id2 in frame deathframe(id1), no resulting centers within a distance %.2f of predicted position of id1'%params.mergeddetection_distance
return False
if DEBUG: print 'based on clustering of id2 in frames t2-1=%d and t1=deathframe(id1) possible id1,id2 pairs: '%(t2-1) + str(possible)
# choose the best (id2,id3) pair
costs = cost_t1.values()
pairs = cost_t1.keys()
pair = pairs[num.argmin(num.array(costs))]
id2 = pair[0]
id1 = pair[1]
t1 = self.milestones.getdeathframe(id1)
clustering_t1 = clusterings_t1[(t1,id2)]
assignment_t1 = assignments_t1[(id2,id1)]
clustering_t2 = clusterings_t2[id2]
assignment_t2 = assignments_t2[id2]
# fix
if DEBUG: print 'splitting id2=%d into id1=%d and id2=%d from frames t1=%d to t2-1=%d, replacing id3=%d'%(id2,id1,id2,t1,t2-1,id3)
# store old tracks in case we need them
oldtracks = {}
for t in range(t1,t2):
oldtracks[t] = self.tracks[t].copy()
# in frame t1, replace id2 with clustering[assignment[0]]
if clustering_t1 is None:
if DEBUG: print 'first clustering is None, not actually doing a fix'
return False
tmp = clustering_t1[assignment_t1[0]]
tmp.identity = id2
self.tracks[t1].append(tmp)
# in frame t1, add id1 = clustering[assignment[1]]
tmp = clustering_t1[assignment_t1[1]]
tmp.identity = id1
self.tracks[t1].append(tmp)
# for frames t1+1 through t2-1, cluster id2 and choose the best assignment
for t in range(t1+1,t2):
(cc,dfore) = self.cc(t)
prev = self.tracks[max(0,t-2)]
curr = self.tracks[max(0,t-1)]
pred1 = self.cvpred(prev,curr,id1)
pred2 = self.cvpred(prev,curr,id2)
clustering_t = splitobservation(cc==(id2+1),dfore,2,[pred1,pred2])
# can't split? then go back to old tracks and return
if clustering_t is None:
if DEBUG: print 'clustering is bad, reverting'
for tt in range(t1,t2):
self.tracks[tt] = oldtracks[tt]
return False
d1 = pred1.dist(clustering_t[0]) + pred2.dist(clustering_t[1])
d2 = pred1.dist(clustering_t[1]) + pred2.dist(clustering_t[0])
if d1 < d2:
assignment_t = [0,1]
else:
assignment_t = [1,0]
tmp = clustering_t[assignment_t[0]]
tmp.identity = id1
self.tracks[t].append(tmp)
if DEBUG: print 'adding ' + str(tmp) + ' to tracks'
tmp = clustering_t[assignment_t[1]]
tmp.identity = id2
self.tracks[t].append(tmp)
if DEBUG: print 'adding ' + str(tmp) + ' to tracks'
if DEBUG: print 'tracks[%d] is now: '%t + str(self.tracks[t])
# choose between: assigning (id3 <- id2, id2 <- id1) and
# (id3 <- id1,id2 <- id2)
prev = self.tracks[max(0,t2-2)]
curr = self.tracks[max(0,t2-1)]
pred1 = self.cvpred(prev,curr,id1)
pred2 = self.cvpred(prev,curr,id2)
d1 = pred1.dist(self.tracks[t2][id2]) + pred2.dist(self.tracks[t2][id3])
d2 = pred1.dist(self.tracks[t2][id3]) + pred2.dist(self.tracks[t2][id2])
if d1 < d2:
# from t2 to end
for t in range(t2,len(self.tracks)):
if (not self.tracks[t].hasItem(id2)) and \
(not self.tracks[t].hasItem(id3)):
break
# replace id2 with id1
if self.tracks[t].hasItem(id2):
tmp = self.tracks[t].pop(id2)
tmp.identity = id1
self.tracks[t].append(tmp)
# replace id3 with id2
if self.tracks[t].hasItem(id3):
tmp = self.tracks[t].pop(id3)
tmp.identity = id2
self.tracks[t].append(tmp)
# death frames for id2, id3
d2 = self.milestones.getdeathframe(id2)
d3 = self.milestones.getdeathframe(id3)
# delete id3
self.milestones.deleteid(id3)
# set id1 to die when id2 died
self.milestones.setdeath(id1,d2)
# set id2 to die when id3 died
self.milestones.setdeath(id2,d3)
else:
# from t2 to end
for t in range(t2,len(self.tracks)):
if not self.tracks[t].hasItem(id3):
break
# replace id3 with id1
tmp = self.tracks[t].pop(id3)
tmp.identity = id1
self.tracks[t].append(tmp)
# get death frame for id3
d3 = self.milestones.getdeathframe(id3)
# delete id3
self.milestones.deleteid(id3)
# id1 now dies when id3 died
self.milestones.setdeath(id1,d3)
return True
def fix_splitdetection(self,id1,t2):
if DEBUG: print 'trying to fix death of id1=%d in frame t2=%d by merging'%(id1,t2)
# case 1:
# 2, 2, 1/2, 1/2, 1/2, 2, 2, 2
# t1, , t2
# birth of id1 in frame t1
# death of id1 in frame t2
# target id2 alive from t1 to t2-1 that can be merged with id1 with low cost
# t2 - t1 small
# case 2:
# 1, 1, 1/2, 1/2, 1/2, 2, 2, 2
# t1, , t2
# birth of id2 in frame t1
# death of id1 in frame t2
# id1 and id2 can be merged with low cost from t1 to t2-1
# t2 - t1 small
# check if id1 is born late enough
isbornlate = t2 - self.milestones.getbirthframe(id1) <= params.splitdetection_length
# get a list of possible id2s with possible t1s
# if isbornlate, include all targets alive from (t1=target2birth[id1])-1 to t2
# always include all targets id2 alive in frame t2 such that
# t2-target2birth[id2] <= params.splitdetection_length
# with t1 = target2birth[id2]
t1 = self.milestones.getbirthframe(id1)
possible = set([])
# loop through all targets alive in frame t2
for id2 in self.tracks[t2].iterkeys():
if id1 == id2:
continue
# check to see if the birthdate is late enough
if t2 - self.milestones.getbirthframe(id2) <= params.splitdetection_length:
if (self.milestones.getbirthframe(id2)>t1) and (self.milestones.getbirthframe(id2)<t2):
possible.add((id2,self.milestones.getbirthframe(id2)))
# check to see if id2 alive in frame (t1=target2birth[id1])-1
if isbornlate:
if self.milestones.getbirthframe(id2)<t1:
possible.add((id2,t1))
if len(possible) == 0:
if DEBUG: print 'no targets id2 born within %d frames of t2=%d'%(params.splitdetection_length,t2)
return False
if DEBUG: print 'based just on birth frames, possible (id2,birthframe(id2))s: '+str(possible)
# limit to those centers that are close enough in frames t1:t2-1
self.update_close_centers(id1,t2,possible)
if len(possible) == 0:
if DEBUG: print 'none of the id2s centers are within distance %.2f of id1=%d in all frames between birthframe(id2) and t2=%d'%(self.maxdcenters,id1,t2)
return False
if DEBUG: print '(id2,birth(id2)) whose centers are within distance %.2f of id1=%d in all frames between birthframe(id2) and t2=%d: '%(self.maxdcenters,id1,t2) + str(possible)
# compute the penalty for merging
(mergecosts,merged_targets) = self.compute_merge_cost(id1,t2,possible)
# choose the minimum mergecost
costs = mergecosts.values()
pairs = mergecosts.keys()
pair = pairs[num.argmin(costs)]
mergecost = mergecosts[pair]
# see if this is small enough
if mergecost > params.splitdetection_cost:
if DEBUG: print 'cost of merging for all id2 is too high'
return False
id2 = pair[0]
t1 = pair[1]
merged_target = merged_targets[pair]
if DEBUG: print 'choosing to merge with id2=%d from frame t1=%d to t2=%d, cost is %.2f'%(id2,t1,t2,mergecost)
# which target is born last? we will delete that target
if self.milestones.getbirthframe(id1) < self.milestones.getbirthframe(id2):
firstborn = id1
lastborn = id2
else:
firstborn = id2
lastborn = id1
# perform the merge
for t in range(t1,t2):
# delete lastborn
tmp = self.tracks[t].pop(lastborn)
# replace id2 with merged_target
merged_target[t-t1].identity = firstborn
if DEBUG: print 'deleting target %d from frame %d: '%(lastborn,t) + str(self.tracks[t][lastborn])
if DEBUG: print 'replacing target %d in frame %d: '%(firstborn,t) + str(self.tracks[t][firstborn])
if DEBUG: print 'with: ' + str(merged_target[t-t1])
self.tracks[t].append(merged_target[t-t1])
# replace the lastborn after t2 with the firstborn
for t in range(t2,len(self.tracks)):
if not self.tracks[t].hasItem(lastborn):
break
tmp = self.tracks[t].pop(lastborn)
tmp.identity = firstborn
self.tracks[t].append(tmp)
# update milestones
# set death date of first born
self.milestones.setdeath(firstborn,max(self.milestones.getdeathframe(firstborn),
self.milestones.getdeathframe(lastborn)))
# delete lastborn
self.milestones.deleteid(lastborn)
return True
def update_close_centers(self,id1,t2,possible):
tmp = list(possible)
for pair in tmp:
id2 = pair[0]
t1 = pair[1]
for t in range(t1,t2):
d = num.sqrt((self.tracks[t][id1].x-self.tracks[t][id2].x)**2. + \
(self.tracks[t][id1].y-self.tracks[t][id2].y)**2.)
if d > self.maxdcenters:
possible.remove(pair)
break
def compute_merge_cost(self,id1,t2,possible):
costs = {}
merged_targets = {}
for pair in possible:
id2 = pair[0]
t1 = pair[1]
merged_targets[pair] = []
costs[pair] = -num.inf
for t in range(t1,t2):
if DEBUG: print 'computing merge cost for frame %d'%t
# get the connected component image
(cc,dfore) = self.cc(t)
ccelements = num.unique(cc)
if DEBUG:
for ccelement in ccelements:
(tmp1,tmp2) = num.where(cc==ccelement)
print 'count(%d) = %d'%(ccelement,len(tmp1))
if DEBUG: print 'id1=%d,id2=%d'%(id1,id2)
if DEBUG: print 'tracks[%d][%d] = '%(t,id1) + str(self.tracks[t][id1])
if DEBUG: print 'tracks[%d][%d] = '%(t,id2) + str(self.tracks[t][id2])
(cost,targ) = est.hindsight_computemergepenalty(self.tracks[t],id1,id2,cc,dfore)
costs[pair] = max(costs[pair],cost)
# if the cost is too high, then just return
if costs[pair] > params.splitdetection_cost:
break
targ.identity = id2
merged_targets[pair].append(targ)
if DEBUG: print 'result of merging ' + str(self.tracks[t][id1])
if DEBUG: print 'and ' + str(self.tracks[t][id2])
if DEBUG: print '-> ' + str(merged_targets[pair][-1])
return (costs,merged_targets)
def cc(self,t):
# perform background subtraction
(dfore,bw) = self.bg.sub_bg(t+params.start_frame)
# for each pixel, find the target it most likely belongs to
(y,x) = num.where(bw)
mind = num.zeros(y.shape)
mind[:] = num.inf
closest = num.zeros(y.shape)
for targ in self.tracks[t].itervalues():
S = est.ell2cov(targ.major,targ.minor,targ.angle)
Sinv = num.linalg.inv(S)
xx = x.astype(float) - targ.x
yy = y.astype(float) - targ.y
d = xx**2*Sinv[0,0] + 2*Sinv[0,1]*xx*yy + yy**2*Sinv[1,1]
issmall = d <= mind
mind[issmall] = d[issmall]
closest[issmall] = targ.identity
# set each pixel in L to belong to the closest target
L = num.zeros(bw.shape)
L[bw] = closest+1
#mpl.imshow(L)
#mpl.show()
return (L,dfore)
def cvpred(self,prev,curr,id):
if not prev.hasItem(id):
if curr.hasItem(id):
prev = curr
else:
return -1
if not curr.hasItem(id):
curr = prev
currlist = ell.TargetList()
prevlist = ell.TargetList()
prevlist[id] = prev
currlist[id] = curr
pred = matchidentities.cvpred(prev,curr)[id]
return pred
def initialize_possibleid2id1pairs(self,id3,t2,pred3):
# initialize list of potential id2s
# this approximates the distance from id3 to the split id2, and returns those
# id2s that are close enough to id3 in t2-1
possibleid2s = self.initialize_possibleid2s(id3,t2,pred3)
if DEBUG: print 'id2s that are close enough to id3=%d in frame t2-1=%d: '%(id3,t2-1) + str(possibleid2s)
possible = set([])
# loop through possible frames t1 that id1 dies
t3 = max(t2-int(params.mergeddetection_length)-1,-1)
if DEBUG: print 't3 = ' + str(t3) + 't2 = ' + str(t2)
t3 = int(t3)
t2 = int(t2)
for t1 in range(t2-1,t3,-1):
# if id2 is not alive in frame t1-1, then remove it as a possibility for any
if DEBUG: print 't1 = %d'%t1
# id2 dying at this frame or before
possibleid2s -= self.milestones.getbirths(t1)
if DEBUG: print 'targets born in frame t1=%d: '%t1
if DEBUG: print self.milestones.getbirths(t1)
if DEBUG: print 'possibleid2s is now: ' + str(possibleid2s)
if DEBUG: print 'targets died in frame t1=%d: '%t1 + str(self.milestones.getdeaths(t1))
# loop through all deaths in this frame
for id1 in list(self.milestones.getdeaths(t1)):
if DEBUG: print 'trying id1 = %d'%id1
if DEBUG: print 'birth frame of id1 = ' + str(self.milestones.getbirthframe(id1))
# compute predicted position of id1 in frame t1
prev = self.tracks[max(0,t1-2)]
curr = self.tracks[t1-1]
if DEBUG: print 'prev[id1=%d] = '%id1 + str(prev[id1])
if DEBUG: print 'curr[id1=%d] = '%id1 + str(curr[id1])
pred1 = self.cvpred(prev,curr,id1)
if DEBUG: print 'pred1 = ' + str(pred1)
for id2 in possibleid2s:
# check to see if id2 is reasonably close to id1
d = num.sqrt((self.tracks[t2][id2].x-pred1.x)**2. + \
(self.tracks[t1][id2].y-pred1.y)**2.)
if d < self.maxdcenters:
possible.add((id2,id1))
if DEBUG: print 'adding (id2=%d,id1=%d)'%(id2,id1)
if DEBUG: print 'id2=%d born in frame '%id2 + str(self.milestones.getbirthframe(id2)) + ', died in frame ' + str(self.milestones.getdeathframe(id2))
if DEBUG: print 'id1=%d born in frame '%id1 + str(self.milestones.getbirthframe(id1)) + ', died in frame ' + str(self.milestones.getdeathframe(id1))
return possible
def initialize_possibleid2s(self,id3,t2,pred3):
# compute targets that are close to predicted location of id3 in t2-1
# and are alive in t2
possible = set([])
if DEBUG: print 'initialize_possibleid2s, pred3 = ' + str(pred3)
for id2 in self.tracks[t2-1].iterkeys():
# alive in t2?
if not self.tracks[t2].hasItem(id2):
continue
# before splitting using clustering, check to see if id2 is
# reasonably close
d = num.sqrt((pred3.x-self.tracks[t2-1][id2].x)**2. + \
(pred3.y-self.tracks[t2-1][id2].y)**2.)
if d < self.maxdcenters:
possible.add(id2)
if DEBUG: print 'distance to id2 = ' + str(self.tracks[t2-1][id2]) + ' = %f'%d
return possible
def pred_id2_t2(self,prev,curr,possible):
pred2_t2 = {}
for pair in possible:
id2 = pair[0]
if not pred2_t2.has_key(id2):
pred2_t2[id2] = self.cvpred(prev,curr,id2)
return pred2_t2
def cluster_id2_t2(self,t,possible,pred3,pred2):
(cc,dfore) = self.cc(t)
# set containing all possible id2s
possibleid2s = set([])
for pair in possible:
possibleid2s.add(pair[0])
clusterings = {}
for id2 in possibleid2s:
pred = [pred3,pred2[id2]]
clusterings[id2] = splitobservation(cc==(id2+1),dfore,2,pred)
return clusterings
def compute_cost_and_assignment(self,clusterings,prev,curr,next,
pred,pred2s):
cost = {}
assignment = {}
for (id2,clustering) in clusterings.iteritems():
# if no pixels to cluster, clustering will be None
# set cost to be large in this case
if clustering is None:
cost[id2] = num.inf
assignment[id2] = [0,1]
continue
if DEBUG: print 'clustering = ' + str(clustering)
# predict position of id2 in frame
pred2 = pred2s[id2]
d1 = pred.dist(clustering[0]) + pred2.dist(clustering[1])
d2 = pred.dist(clustering[1]) + pred2.dist(clustering[0])
if d1 < d2:
cost[id2] = d1
assignment[id2] = [0,1]
else:
cost[id2] = d2
assignment[id2] = [1,0]
cost[id2] -= pred2.dist(next[id2])
return (cost,assignment)
def update_possible_t2(self,possible,cost):
for (j,pair) in enumerate(list(possible)):
if cost[pair[0]] > params.mergeddetection_distance:
possible.remove(pair)
def pred_t1(self,possible):
pred2 = {}
pred1 = {}
for pair in possible:
id2 = pair[0]
id1 = pair[1]
t1 = self.milestones.getdeathframe(id1)
if DEBUG: print 't1 = ' + str(t1)
if t1 == 1:
pred2[id2] = self.tracks[t1-1][id2]
pred1[id1] = self.tracks[t1-1][id1]
else:
prev = self.tracks[t1-2]
curr = self.tracks[t1-1]
if DEBUG: print 'prev = ' + str(prev)
if DEBUG: print 'curr = ' + str(curr)
if DEBUG: print 'tracks from t1-10=%d to end=%d'%(t1-10,len(self.tracks)-1)
if DEBUG:
for tmp in range(max(t1-10,0),len(self.tracks)):
print 'tracks[%d] = '%tmp + str(self.tracks[tmp])
if not pred2.has_key(id2):
pred2[id2] = self.cvpred(prev,curr,id2)
if not pred1.has_key(id1):
pred1[id1] = self.cvpred(prev,curr,id1)
return (pred2,pred1)
def cluster_id2_t1(self,possible,pred2,pred1):
clusterings_t1 = {}
for pair in possible:
id2 = pair[0]
id1 = pair[1]
t1 = self.milestones.getdeathframe(id1)
if not clusterings_t1.has_key((t1,id2)):
(cc,dfore) = self.cc(t1)
pred = [pred2[id2],pred1[id1]]
clusterings_t1[(t1,id2)] = splitobservation(cc==(id2+1),dfore,2,pred)
return clusterings_t1
def compute_cost_and_assignment_t1(self,clusterings_t1,possible,
pred2s,pred1s):
cost = {}
assignment = {}
for pair in possible:
id2 = pair[0]
id1 = pair[1]
t1 = self.milestones.getdeathframe(id1)
clustering = clusterings_t1[(t1,id2)]
if clustering is None:
cost[pair] = num.inf
assignment[pair] = [0,1]
continue
pred2 = pred2s[id2]
pred1 = pred1s[id1]
d1 = pred2.dist(clustering[0]) + pred1.dist(clustering[1])
d2 = pred2.dist(clustering[1]) + pred1.dist(clustering[0])
if d1 < d2:
cost[pair] = d1
assignment[pair] = [0,1]
else:
cost[pair] = d2
assignment[pair] = [1,0]
cost[pair] -= pred2.dist(self.tracks[t1][id2])
return (cost,assignment)
def update_possible_t1(self,possible,cost):
for (j,pair) in enumerate(list(possible)):
if cost[pair] > params.mergeddetection_distance:
tmp = possible.remove(pair)
def splitobservation(bw,dfore,k,init):
(r,c) = num.where(bw)
if DEBUG: print 'number of pixels in component being split: %d'%len(r)
x = num.hstack((c.reshape(c.size,1),r.reshape(r.size,1)))
w = dfore[bw]
if DEBUG: print 'data being clustered: '
if DEBUG: print x
if DEBUG: print 'with weights: '
if DEBUG: print w
# create means and covariance matrices to initialize
mu0 = num.zeros((k,2))
S0 = num.zeros((k,2,2))
priors0 = num.zeros(k)
for i in range(k):
if DEBUG: print 'predicted ellipse %d: '%i + str(init[i])
mu0[i,0] = init[i].x
mu0[i,1] = init[i].y
S0[:,:,i] = est.ell2cov(init[i].major,init[i].minor,init[i].angle)
priors0[i] = init[i].area
(tmpmajor,tmpminor,tmpangle) = est.cov2ell(S0[:,:,i])
priors0 = priors0 / num.sum(priors0)
if DEBUG: print 'initializing with '
if DEBUG: print 'mu0 = '
if DEBUG: print mu0
if DEBUG: print 'S0 = '
if DEBUG:
for i in range(k):
print S0[:,:,i]
if DEBUG: print 'priors0 = '
if DEBUG: print priors0
# are there no data points?
if len(r) == 0:
return None
(mu,S,priors,gamma,negloglik) = kcluster.gmmem(x,mu0,S0,priors0,weights=w,thresh=.1,mincov=.015625)
obs = []
for i in range(k):
(major,minor,angle) = est.cov2ell(S[:,:,i])
obs.append(ell.Ellipse(mu[i,0],mu[i,1],minor,major,angle))
obs[-1].compute_area()
return obs
def ellipseinterpolate(ell1,ell2,dt1,dt2):
# weight of each term in the average
z = float(dt1 + dt2)
w1 = float(dt2) / z
w2 = float(dt1) / z
ell = ell1.copy()
ell.x = ell1.x*w1 + ell2.x*w2
ell.y = ell1.y*w1 + ell2.y*w2
ell.major = ell1.major*w1 + ell2.major*w2
ell.minor = ell1.minor*w1 + ell2.minor*w2
ell.compute_area()
# find signed distance from angle1 to angle2
# this will be between -pi/2 and pi/2
dangle = ((ell2.angle-ell1.angle+num.pi/2.) % (num.pi)) - (num.pi/2.)
theta1 = ell1.angle
theta2 = ell1.angle + dangle
ell.angle = theta1*w1 + theta2*w2
return ell
def computemergepenalty(ellipses,i,j,L,dfore):
# compute parameters of merged component
BWmerge = num.logical_or(L == i+1,L == j+1)
if not BWmerge.any():
return (0.,ellipses[i])
ellipsemerge = weightedregionpropsi(BWmerge,dfore[BWmerge])
print 'in computemergepenalty, ellipsemerge is: ' + str(ellipsemerge)
# find pixels that should be foreground according to the ellipse parameters
(r1,r2,c1,c2) = getboundingboxtight(ellipsemerge,L.shape)
isforepredmerge = ellipsepixels(ellipsemerge,num.array([r1,r2,c1,c2]))
# pixels that were foreground
isforepredi = ellipsepixels(ellipses[i],num.array([r1,r2,c1,c2]))
isforepredj = ellipsepixels(ellipses[j],num.array([r1,r2,c1,c2]))
isforepredi = num.logical_or(isforepredi, (L[r1:r2,c1:c2]==i+1))
# pixels that are now foreground that weren't before
newforemerge = num.logical_and(isforepredmerge,num.logical_or(isforepredi,isforepredj)==False)
# compute the total background score for this new region that must be foreground
dforemerge = dfore[r1:r2,c1:c2].copy()
dforemerge = 1 - dforemerge[newforemerge]
dforemerge[dforemerge<0] = 0
mergepenalty = num.sum(dforemerge)
print 'mergepenalty = ' + str(mergepenalty)
#print 'in computemergepenalty, ellipsemerge is: ' + str(ellipsemerge)
return (mergepenalty,ellipsemerge) | PypiClean |
/BANGal-0.0.2.tar.gz/BANGal-0.0.2/src/BANG/configuration.py | import yaml
from BANG.parameter_search import GalaxyModel
import BANG.data as d
import numpy as np
import cpnest
from BANG.data import data
from BANG.result_analysis import Result_Analysis
from BANG.model_creation import model
import matplotlib.pyplot as plt
from astropy.cosmology import FlatLambdaCDM
import os
def read_yaml(file_path):
'''
Function
----------------------------------------------------------------------
----------------------------------------------------------------------
read yaml (i.e. configuration) file
Parameters:
file_path: str
absolute path to read
Returns:
'''
with open(file_path, "r") as f:
return yaml.safe_load(f)
def read_user(dataset_path,use_ML):
'''
Function
----------------------------------------------------------------------
----------------------------------------------------------------------
read data (i.e. configuration) file. The user should adapt it to its
own dataset
Parameters:
dataset_path: str
absolute path of the data file
use_ML: bool
If False then ML *= np.nan such that it is not
used in the analysis.
Returns:
x_arcsec: 1D arr float32
x positions in arcsecond
y_arcsec: 1D arr float32
y positions in arcsecond
x_kpc: 1D arr float32
x positions in kpc
y_kpc: 1D arr float32
y positions in kpc
ibrightness_data: 1D arr float32
log10 brightness (i-band in this case).
Use np.nan for masking pixels.
v_data: 1D arr float32
line of sight velocity data.
Use np.nan for masking pixels.
sigma_data: 1D arr float32
line of sight velocity dispersion data.
Use np.nan for masking pixels.
ML_data: 1D arr float32
Mass to light ratio data.
Use np.nan for masking pixels.
ibrightness_error: 1D arr float32
log10 brightness errror (i-band in this case).
Use np.nan for masking pixels.
v_error: 1D arr float32
line of sight velocity data errror.
Use np.nan for masking pixels.
sigma_error: 1D arr float32
line of sight velocity dispersion data errror.
Use np.nan for masking pixels.
ML_error: 1D arr float32
Mass to light ratio data errror.
Use np.nan for masking pixels.
goodpix: 1D arr float32
If goodpix[i] == 0. then pixel "i" is not considered.
'''
a = np.load(dataset_path)
x_arcsec = a[:,0]
y_arcsec = a[:,1]
x_kpc = a[:,2]
y_kpc = a[:,3]
ibrightness_data = a[:,4]
v_data = a[:,5]
sigma_data = a[:,6]
ML_data = a[:,7]
ibrightness_error = a[:,8]
v_error = a[:,9]
sigma_error = a[:,10]
ML_error = a[:,11]
goodpix = a[:,12]
SN = a[:,13]
if use_ML:
pass
else:
ML_data *= np.nan
ML_error *= np.nan
return x_arcsec,y_arcsec,x_kpc,y_kpc,ibrightness_data,v_data,sigma_data,ML_data,ibrightness_error,v_error,sigma_error,ML_error,goodpix
class Config_reader(data,Result_Analysis,model):
def __init__(self,config_path,gpu_device=0):
'''
A class for reading configuration files and performing parameter estimation and all diagnostic plots.
----------------------------------------------------------------------
----------------------------------------------------------------------
Attributes:
config_path: str
name/path of the configuration file
gpu_device: int16
integer referring to the gpu machine. Useful only
in case of multi-gpu usage
Methods:
parameter_estimation(gpu_device=0):
Compute parameter estimation.
diagnostic(savename='corner.png',show=True,drop_sys_rho=False,drop_halo=False)
Plot corner plot of the whole parameter space
diagnostic_iteration(savename='iteration.png',show=True,drop_sys_rho=False,drop_halo=False)
Plot each parameter as a function of iterations
maps_2D(savename='2D_maps.png',plot_ML=True,show=True,percentile=[50,16,84],write=True,lim='data',savedata='2D_informations.npy',vmin=None,vmax=None,close=False)
Plot best fit model (2D maps)
maps_1D(savename='1D_profiles.png',plot_ML=True,show=True,percentile=[50,16,84],write=True,close=False)
Plot best fit model (1D maps, averaged profiles)
maps_1D_all_outputs(savename='1D_profiles.png',plot_ML=True,show=True,percentile=[50,16,84],savedata='1D_informations.npy',write=True,close=False)
Plot best fit model with all the components(1D maps, averaged profiles)
maps_2D_specify_params(Best_fit_params={},savename='2D_maps.png',plot_ML=True,show=True,write=True,lim='data',vmin=None,vmax=None,close=False)
Plot model with using Best_fit_params as parameters (2D maps)
maps_1D_specify_params(Best_fit_params={},savename='1D_profiles.png',plot_ML=True,show=True,write=True,close=False)
Plot model with using Best_fit_params as parameters (1D maps, averaged profiles)
best_params_all_fits(write=True,filename='all_params.txt')
Write best fit params to file while fitting multiples objects
mass_plot(min_r=0.1,max_r=10,Nbins=20,figname='mass_profile.png')
Plot mass profile, actually available only with Bulge + inner disc+ outer disc model
'''
#super().__init__(config_path)
#super(Config_reader,self).__init__()
config = read_yaml(config_path)
if "SNR_threshold_velocity" in config["Settings"]:
self.SNR_threshold_velocity = config["Settings"]["SNR_threshold_velocity"]
if "SNR_threshold_dispersion" in config["Settings"]:
self.SNR_threshold_dispersion = config["Settings"]["SNR_threshold_dispersion"]
if "SNR_threshold_ML" in config["Settings"]:
self.SNR_threshold_ML = config["Settings"]["SNR_threshold_ML"]
else:
self.SNR_threshold = 0.
if "velocity_offset" in config["Settings"]:
self.vel_offset = config["Settings"]["velocity_offset"]
else:
self.vel_offset = False
if "use_ML" in config["Settings"]:
self.use_ML = config["Settings"]["use_ML"]
else:
self.use_ML = True
self.redshift = config["galaxy"]["redshift"]
self.galaxy_name = config["galaxy"]["Name"]
# READ device and fastmath option
self.device = config["device"]["name"]
self.fast_math = config["device"]["fastmath"]
# READ input file directory and output directory
self.input_path = config["Settings"]["input_path"]
self.output_path = config["Settings"]["output_path"]
self.output_path += '/'
# READ psf sigma in kpc
self.kin_sigma_psf = config["Settings"]['kin_psf_sigma_kpc']
self.pho_sigma_psf = config["Settings"]['pho_psf_sigma_kpc']
self.N_px = config["Settings"]['N_pixels']
self.resolution = config["Settings"]['resolution']
galactic_components = config["galactic_components"]
reference_frame = config["Reference_frame"]
systematic_errors = config["systematic_errors"]
# download all parameter names
names,LaTex_names,bounds,value,vary,self.include = [],[],[],[],[],[]
self.bulge_type = galactic_components['Bulge']['name']
self.halo_type = galactic_components['Halo']['name']
for k in galactic_components:
# Read which component to include
self.include.append(galactic_components[k]['include'])
if galactic_components[k]['include'] == True:
# choose parameters
for j in galactic_components[k]["parameters"]:
names.append(galactic_components[k]["parameters"][j]['name'])
LaTex_names.append(galactic_components[k]["parameters"][j]['LaTex_name'])
vary.append(galactic_components[k]["parameters"][j]['vary'])
bounds.append(galactic_components[k]["parameters"][j]['bounds'])
value.append(galactic_components[k]["parameters"][j]['value'])
else:
# exclude component
pass
for k in reference_frame:
names.append(reference_frame[k]["name"])
LaTex_names.append(reference_frame[k]["LaTex_name"])
bounds.append(reference_frame[k]["bounds"])
vary.append(reference_frame[k]["vary"])
value.append(reference_frame[k]["value"])
for k in systematic_errors:
names.append(systematic_errors[k]["name"])
LaTex_names.append(systematic_errors[k]["LaTex_name"])
bounds.append(systematic_errors[k]["bounds"])
vary.append(systematic_errors[k]["vary"])
value.append(systematic_errors[k]["value"])
if config["Settings"]["sys_rho"] == True:
pass
else:
print("Error likelihood without systematic error is not implemented")
print("Put fixed=True and value=-10 in systematic_errors")
exit()
# Build all the relevant dictionaries
self.variable_quantities = {names[i]:bounds[i] for i in range(0,len(names)) if vary[i]==True}
self.fixed_quantities = {names[i]:value[i] for i in range(0,len(names)) if vary[i]==False}
self.variable_LaTex = {names[i]:LaTex_names[i] for i in range(0,len(names)) if vary[i]==True}
self.fixed_LaTex = {names[i]:LaTex_names[i] for i in range(0,len(names)) if vary[i]==False}
# READ dataset
self.x_arcsec,self.y_arcsec,self.x,self.y,self.ibrightness_data,self.v_data,self.sigma_data,self.ML_data,\
self.ibrightness_error,self.v_error,self.sigma_error,self.ML_error,self.goodpix = read_user(self.input_path,velocity_offset=self.vel_offset,\
use_ML=self.use_ML,SNR_threshold_velocity=self.SNR_threshold_velocity,\
SNR_threshold_dispersion=self.SNR_threshold_dispersion,\
SNR_threshold_ML=self.SNR_threshold_ML)
#####################################################################
#####################################################################
# CREATE data_object useful for later plots
#####################################################################
#####################################################################
self.data_obj = data(self.x,self.y)
# 0.009 assumes FWHM of 0.3 arcsec
#self.x_grid,self.y_grid,self.psf = self.data_obj.refined_grid(size=int(np.sqrt(self.K)),sigma_psf=self.sigma_psf,sampling='linear',N_psf=4)
self.x_grid,self.y_grid,self.kin_psf,self.K,self.indexing = self.data_obj.refined_grid_indexing(sigma_psf=self.kin_sigma_psf,N_px=self.N_px,resolution=self.resolution)
_,_,self.pho_psf,_,_ = self.data_obj.refined_grid_indexing(sigma_psf=self.pho_sigma_psf,N_px=self.N_px,resolution=self.resolution)
self.arcsec_to_kpc = abs(self.x[0]-self.x[1])/abs(self.x_arcsec[0]-self.x_arcsec[1])
#####################################################################
#####################################################################
# CREATE model_object useful for later plots
#####################################################################
#####################################################################
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
H_z = cosmo.H(self.redshift).value # this is in km/Mpc/s
H_z = H_z*1e5/(1e6*3.086e18) # this is in 1/s
G = 4.299e-6 # kcp*km^2/M_sun/s^2
G_1 = G * (1e5*1e-3*3.24078e-19)**2 # kcp^3/M_sun/s^2
self.rho_crit = 3*H_z*H_z/(8*np.pi*G_1)
self.model_obj = model(self.device,
gpu_device,
self.fast_math,
self.include,
self.bulge_type,
self.halo_type,
self.data_obj.N,
self.data_obj.J,
self.K,
self.x_grid,
self.y_grid,
self.ibrightness_data,
self.v_data,
self.sigma_data,
self.ML_data,
self.ibrightness_error,
self.v_error,
self.sigma_error,
self.ML_error,
self.kin_psf,
self.pho_psf,
self.indexing,
self.goodpix,
self.rho_crit)
#####################################################################
#####################################################################
# CREATE result_object useful for later plots
#####################################################################
#####################################################################
self.result_obj = Result_Analysis(self.output_path,
self.rho_crit,
self.halo_type)
#####################################################################
#####################################################################
# READ SPECIFIC OF NESTED SAMPLING
#####################################################################
#####################################################################
self.verbose = config["parameter_estimator"]["Nested_sampling"]["verbose"]
self.poolsize = config["parameter_estimator"]["Nested_sampling"]["poolsize"]
self.nthreads = config["parameter_estimator"]["Nested_sampling"]["nthreads"]
self.nlive = config["parameter_estimator"]["Nested_sampling"]["nlive"]
self.maxmcmc = config["parameter_estimator"]["Nested_sampling"]["maxmcmc"]
self.nhamiltonian = config["parameter_estimator"]["Nested_sampling"]["nhamiltonian"]
self.nslice = config["parameter_estimator"]["Nested_sampling"]["nslice"]
self.resume = config["parameter_estimator"]["Nested_sampling"]["resume"]
def parameter_estimation(self,gpu_device=0):
'''
Class Method
----------------------------------------------------------------------
----------------------------------------------------------------------
Start parameter estimation assuming model specified in configuration file
Parameters:
gpu_device: int16
ID of the GPU. If multi-gpu is not needed, set it to zero.
Returns:
'''
M = GalaxyModel(self.device,
gpu_device,
self.fast_math,
self.include,
self.bulge_type,
self.halo_type,
self.data_obj.N,
self.data_obj.J,
self.K,
self.x_grid,
self.y_grid,
self.ibrightness_data,
self.v_data,
self.sigma_data,
self.ML_data,
self.ibrightness_error,
self.v_error,
self.sigma_error,
self.ML_error,
self.kin_psf,
self.pho_psf,
self.indexing,
self.goodpix,
self.variable_quantities,
self.fixed_quantities,
self.rho_crit)
work=cpnest.CPNest(M,
verbose = self.verbose,
poolsize = self.poolsize,
nthreads = self.nthreads ,
nlive = self.nlive,
maxmcmc = self.maxmcmc,
output = self.output_path,
nhamiltonian = self.nhamiltonian,
nslice = self.nslice,
resume = self.resume,
periodic_checkpoint_interval = 600)
work.run()
work.get_posterior_samples()
work.get_nested_samples()
def diagnostic(self,savename='corner.png',show=True,drop_sys_rho=False,drop_halo=False):
'''
Class Method
----------------------------------------------------------------------
----------------------------------------------------------------------
Plot posterior probabilities of all the parameters, assume existence of
"posterior.dat" file.
Parameters:
savename: str
Name for saving the plot
show: Bool
If plot should be shown on screen
drop_sys_rho: Bool
If used and set to True put systematic_error of the brightnees in
cornern plot
drop_halo: Bool
If True does not show halo posteriors
Returns:
fig: figure
corner plot
'''
fig = self.result_obj.corner_plot(self.variable_LaTex)
fig.savefig(self.output_path+savename)
if show==True:
fig.show()
plt.show()
return fig
def diagnostic_iteration(self,savename='iteration.png',show=True,drop_sys_rho=False,drop_halo=False):
'''
Class Method
----------------------------------------------------------------------
----------------------------------------------------------------------
Plot parameters as a function of iteration in the nested sampling, assume existence of
"nested_samples.dat" file.
Parameters:
savename: str
Name for saving the plot
show: Bool
If plot should be shown on screen
drop_sys_rho: Bool
If used and set to True put systematic_error of the brightnees in
cornern plot
drop_halo: Bool
If True does not show halo posteriors
Returns:
fig: figure
corner plot
'''
fig = self.result_obj.iteration_plot(self.variable_LaTex)
fig.savefig(self.output_path+savename)
if show==True:
fig.show()
plt.show()
return fig
def maps_2D(self,savename='2D_maps.png',plot_ML=True,show=True,percentile=[50,16,84],write=True,lim='data',savedata='2D_informations.npy',vmin=None,vmax=None,close=False):
'''
Class Method
----------------------------------------------------------------------
----------------------------------------------------------------------
Plot best fit model assuming existence of "posterior.dat" file. Plot is
3 X 4 or 3 X 3.
First line is the best fit model, second line are the data and third line are the residuals.
From left to right: brightness,velocity,dispersion and (possibly) Mass to light ratio.
Parameters:
savename: str
Name for saving the plot
plot_ML: str
If True plot also Mass to light ratio
show: Bool
If plot should be shown on screen
percentile: 1D arr float32
Which "percentiles" to use for computing best fit params and error.
Default is:
- 50 (median) for best fit params
- 16 for lower error
- 84 for upper error
write: Bool
Whether or not save to file the best fit params and errors.
lim: str
Wheter to use "data" or "model" or "user" for the limits of the plot
vmin: 1D arr float32
In case lim is set to "user" specify min value of the colormap
vmax: 1D arr float32
In case lim is set to "user" specify max value of the colormap
savedata: str
name of the file for saving best fit model values. Data are saved in the
following format:
-tot[:,0] : x in arcsec
-tot[:,1] : y in arcsec
-tot[:,2] : x in kpc
-tot[:,3] : y in kpc
-tot[:,4] : brightness data (log10)
-tot[:,5] : brightness model (log10)
-tot[:,6] : v data
-tot[:,7] : v model
-tot[:,8] : sigma data
-tot[:,9] : sigma model
-tot[:,10] : ML data
-tot[:,11] : ML model
-tot[:,12] : brightness log10 error
-tot[:,13] : v data error
-tot[:,14] : sigma data error
-tot[:,15] : ML data error
Returns:
ibrightness_model: 1D arr float32
best fit brightness
v_model: 1D arr float32
best fit velocity
sigma_model: 1D arr float32
best fit dispersion
ML_model: 1D arr float32
best fit mass to ligh
fig: figure
'''
Best_fit_params,_,_ = self.result_obj.best_params(percentile=percentile,write=write)
all_params = {**Best_fit_params, **self.fixed_quantities}
ibrightness_model,_,_,_,v_model,_,_,_,sigma_model,_,_,_,LM_ratio,_,_,_ = self.model_obj.model(all_params)
ML_model = 1.0/LM_ratio
#ML_model = np.log10(1.0/LM_ratio)
fig = self.data_obj.all_maps_kpc(self.ibrightness_data,
np.log10(ibrightness_model),
self.v_data,
v_model,
self.sigma_data,
sigma_model,
self.ML_data,
ML_model,
10**self.ibrightness_data*self.ibrightness_error*np.log(10),
self.v_error,
self.sigma_error,
self.ML_error,
plot_ML=plot_ML,
lim = lim,
vmin=vmin,
vmax=vmax)
fig.savefig(self.output_path+savename)
if show==True:
fig.show()
plt.show()
tot = np.concatenate((self.x_arcsec.reshape(-1,1),
self.y_arcsec.reshape(-1,1),
self.x.reshape(-1,1),
self.y.reshape(-1,1),
self.ibrightness_data.reshape(-1,1),
np.log10(ibrightness_model).reshape(-1,1),
self.v_data.reshape(-1,1),
v_model.reshape(-1,1),
self.sigma_data.reshape(-1,1),
sigma_model.reshape(-1,1),
self.ML_data.reshape(-1,1),
ML_model.reshape(-1,1),
self.v_error.reshape(-1,1),
self.sigma_error.reshape(-1,1),
self.ML_error.reshape(-1,1),
),axis=1)
np.save(self.output_path+savedata,tot)
return ibrightness_model,v_model,sigma_model,ML_model,fig
def maps_1D(self,savename='1D_profiles.png',plot_ML=True,show=True,percentile=[50,16,84],write=True,close=False):
'''
Class Method
----------------------------------------------------------------------
----------------------------------------------------------------------
Plot best fit model assuming existence of "posterior.dat" file. Plot is
1 X 4 or 1 X 3.
From left to right: brightness,velocity,dispersion and (possibly) Mass to light ratio.
Profiles are azimuthally averaged.
Parameters:
savename: str
Name for saving the plot
plot_ML: str
If True plot also Mass to light ratio
show: Bool
If plot should be shown on screen
percentile: 1D arr float32
Which "percentiles" to use for computing best fit params and error.
Default is:
- 50 (median) for best fit params
- 16 for lower error
- 84 for upper error
write: Bool
Whether or not save to file the best fit params and errors.
Returns:
fig: figure
'''
Best_fit_params,_,_ = self.result_obj.best_params(percentile=percentile,write=write)
all_params = {**Best_fit_params , **self.fixed_quantities}
ibrightness_model,_,_,_,v_model,_,_,_,sigma_model,_,_,_,LM_ratio,_,_,_ = self.model_obj.model(all_params)
ML_model = 1.0/LM_ratio
#ML_model = np.log10(1.0/LM_ratio)
minr = 0.4 # dimension of pixel is 0.5 arcsec
maxr_kin = np.nanmax((self.x_arcsec**2+self.y_arcsec**2)**0.5*self.v_data/self.v_data)
maxr_phot = 1.2*maxr_kin
maxr_kin -= 1.5
Nbin_phot = int((maxr_phot-minr)/1.0)
Nbin_kin = int((maxr_kin-minr)/1.0)
Nbin_phot = max(Nbin_phot,7)
Nbin_kin = max(Nbin_kin,7)
r_avg_B,B_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ibrightness_model,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,10**self.ibrightness_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,10**self.ibrightness_data*self.ibrightness_error*np.log(10),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
r_avg_v,v_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_model,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.v_data,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.v_error,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
r_avg_sigma,sigma_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_model,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.sigma_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.sigma_error,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
r_avg_ML,ML_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_model,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ML_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ML_error,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
#B_avg_err = np.nanmean(10**self.ibrightness_data*self.ibrightness_error*np.log(10))
#v_avg_err = np.nanmean(self.v_error)
#sigma_avg_err = np.nanmean(self.sigma_error)
#ML_avg_err = np.nanmean(10**self.ML_data*self.ML_error*np.log(10))
#ML_avg_err = np.nanmean(self.ML_error)
fig = self.data_obj.all_1D_profile(
self.arcsec_to_kpc,
r_avg_B,
B_avg_data,
B_avg_model,
r_avg_v,
v_avg_data,
v_avg_model,
r_avg_sigma,
sigma_avg_data,
sigma_avg_model,
r_avg_ML,
ML_avg_data,
ML_avg_model,
B_avg_err,
v_avg_err,
sigma_avg_err,
ML_avg_err,
plot_ML=plot_ML)
fig.savefig(self.output_path+savename)
if show==True:
fig.show()
plt.show()
return fig
def maps_1D_all_outputs(self,savename='1D_profiles.png',plot_ML=True,show=True,percentile=[50,16,84],savedata='1D_informations.npy',write=True,close=False):
'''
Class Method
----------------------------------------------------------------------
----------------------------------------------------------------------
Plot best fit model assuming existence of "posterior.dat" file. Plot is
1 X 4 or 1 X 3.
From left to right: brightness,velocity,dispersion and (possibly) Mass to light ratio.
Profiles are azimuthally averaged. All components are present.
Parameters:
savename: str
Name for saving the plot
plot_ML: str
If True plot also Mass to light ratio
show: Bool
If plot should be shown on screen
percentile: 1D arr float32
Which "percentiles" to use for computing best fit params and error.
Default is:
- 50 (median) for best fit params
- 16 for lower error
- 84 for upper error
write: Bool
Whether or not save to file the best fit params and errors.
savedata: str
name of the file for saving best fit model values. Data are saved in the
see below the format
Returns:
ibrightness_model: 1D arr float32
best fit brightness
v_model: 1D arr float32
best fit velocity
sigma_model: 1D arr float32
best fit dispersion
ML_model: 1D arr float32
best fit mass to ligh
fig: figure
Note: this is probably working only with bulge+ inner disc+ outer disc model
'''
Best_fit_params,_,_ = self.result_obj.best_params(percentile=percentile,write=write)
all_params = {**Best_fit_params , **self.fixed_quantities}
ibrightness_model,B_B,B_D1,B_D2,v_model,v_B,v_D1,v_D2,\
sigma_model,sigma_B,sigma_D1,sigma_D2,LM_ratio,LM_B,LM_D1,LM_D2 = self.model_obj.model(all_params)
ML_model,ML_B,ML_D1,ML_D2 = 1.0/LM_ratio,1.0/LM_B,1.0/LM_D1,1.0/LM_D2
#ML_model = np.log10(1.0/LM_ratio)
minr = 0.4 # dimension of pixel is 0.5 arcsec
maxr_kin = np.nanmax((self.x_arcsec**2+self.y_arcsec**2)**0.5*self.v_data/self.v_data)
maxr_phot = 1.2**maxr_kin
maxr_kin -= 1.5
Nbin_phot = int((maxr_phot-minr)/1.0)
Nbin_kin = int((maxr_kin-minr)/1.0)
Nbin_phot = max(Nbin_phot,7)
Nbin_kin = max(Nbin_kin,7)
r_avg_B,B_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,np.log10(ibrightness_model),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_B_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,np.log10(B_B),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_D1_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,np.log10(B_D1),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_D2_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,np.log10(B_D2),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ibrightness_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ibrightness_error,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
r_avg_v,v_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_model,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_B_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_B,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_D1_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_D1,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_D2_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_D2,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.v_data,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.v_error,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
r_avg_sigma,sigma_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_model,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_B_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_B,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_D1_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_D1,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_D2_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_D2,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.sigma_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.sigma_error,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
r_avg_ML,ML_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_model,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_B_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_B,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_D1_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_D1,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_D2_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_D2,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ML_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ML_error,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
#B_avg_err = np.nanmean(10**self.ibrightness_data*self.ibrightness_error*np.log(10))
#v_avg_err = np.nanmean(self.v_error)
#sigma_avg_err = np.nanmean(self.sigma_error)
#ML_avg_err = np.nanmean(10**self.ML_data*self.ML_error*np.log(10))
#ML_avg_err = np.nanmean(self.ML_error)
fig = self.data_obj.all_1D_profile_all_outputs(
self.arcsec_to_kpc,
r_avg_B,
B_avg_data,
B_avg_model,
B_B_avg,
B_D1_avg,
B_D2_avg,
r_avg_v,
v_avg_data,
v_avg_model,
v_B_avg,
v_D1_avg,
v_D2_avg,
r_avg_sigma,
sigma_avg_data,
sigma_avg_model,
sigma_B_avg,
sigma_D1_avg,
sigma_D2_avg,
r_avg_ML,
ML_avg_data,
ML_avg_model,
ML_B_avg,
ML_D1_avg,
ML_D2_avg,
B_avg_err,
v_avg_err,
sigma_avg_err,
ML_avg_err,
plot_ML=plot_ML)
fig.savefig(self.output_path+savename)
tot = np.ones((int(max(Nbin_phot,Nbin_kin)),28))*np.nan
tot[:np.size(r_avg_B),0] = r_avg_B
tot[:np.size(B_avg_model),1] = B_avg_model
tot[:np.size(B_B_avg),2] = B_B_avg
tot[:np.size(B_D1_avg),3] = B_D1_avg
tot[:np.size(B_D2_avg),4] = B_D2_avg
tot[:np.size(B_avg_data),5] = B_avg_data
tot[:np.size(B_avg_err),6] = B_avg_err
tot[:np.size(r_avg_v),7] = r_avg_v
tot[:np.size(v_avg_model),8] = v_avg_model
tot[:np.size(v_B_avg),9] = v_B_avg
tot[:np.size(v_D1_avg),10] = v_D1_avg
tot[:np.size(v_D2_avg),11] = v_D2_avg
tot[:np.size(v_avg_data),12] = v_avg_data
tot[:np.size(v_avg_err),13] = v_avg_err
tot[:np.size(r_avg_sigma),14] = r_avg_sigma
tot[:np.size(sigma_avg_model),15] = sigma_avg_model
tot[:np.size(sigma_B_avg),16] = sigma_B_avg
tot[:np.size(sigma_D1_avg),17] = sigma_D1_avg
tot[:np.size(sigma_D2_avg),18] = sigma_D2_avg
tot[:np.size(sigma_avg_data),19] = sigma_avg_data
tot[:np.size(sigma_avg_err),20] = sigma_avg_err
tot[:np.size(r_avg_ML),21] = r_avg_ML
tot[:np.size(ML_avg_model),22] = ML_avg_model
tot[:np.size(ML_B_avg),23] = ML_B_avg
tot[:np.size(ML_D1_avg),24] = ML_D1_avg
tot[:np.size(ML_D2_avg),25] = ML_D2_avg
tot[:np.size(ML_avg_data),26] = ML_avg_data
tot[:np.size(ML_avg_err),27] = ML_avg_err
np.save(self.output_path+savedata,tot)
if show==True:
fig.show()
plt.show()
return fig
def maps_2D_specify_params(self,Best_fit_params={},savename='2D_maps.png',plot_ML=True,show=True,write=True,lim='data',vmin=None,vmax=None,close=False):
all_params = {**Best_fit_params, **self.fixed_quantities}
ibrightness_model,_,_,_,v_model,_,_,_,sigma_model,_,_,_,LM_ratio,_,_,_ = self.model_obj.model(all_params)
ML_model = 1.0/LM_ratio
#ML_model = np.log10(1.0/LM_ratio)
fig = self.data_obj.all_maps_kpc(self.ibrightness_data,
np.log10(ibrightness_model),
self.v_data,
v_model,
self.sigma_data,
sigma_model,
self.ML_data,
ML_model,
self.v_error,
self.sigma_error,
self.ML_error,
plot_ML=plot_ML,
lim = lim,
vmin=vmin,
vmax=vmax)
fig.savefig(self.output_path+savename)
if show==True:
fig.show()
plt.show()
return ibrightness_model,v_model,sigma_model,ML_model,fig
def maps_1D_specify_params(self,Best_fit_params={},savename='1D_profiles.png',plot_ML=True,show=True,write=True,close=False):
all_params = {**Best_fit_params , **self.fixed_quantities}
ibrightness_model,B_B,B_D1,B_D2,v_model,v_B,v_D1,v_D2,\
sigma_model,sigma_B,sigma_D1,sigma_D2,LM_ratio,LM_B,LM_D1,LM_D2 = self.model_obj.model(all_params)
ML_model,ML_B,ML_D1,ML_D2 = 1.0/LM_ratio,1.0/LM_B,1.0/LM_D1,1.0/LM_D2
#ML_model = np.log10(1.0/LM_ratio)
minr = 0.4 # dimension of pixel is 0.5 arcsec
maxr_kin = np.nanmax((self.x_arcsec**2+self.y_arcsec**2)**0.5*self.v_data/self.v_data)
maxr_phot = 2*maxr_kin
maxr_kin -= 1.5
Nbin_phot = int((maxr_phot-minr)/1.0)
Nbin_kin = int((maxr_kin-minr)/1.0)
Nbin_phot = max(Nbin_phot,7)
Nbin_kin = max(Nbin_kin,7)
r_avg_B,B_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,np.log10(ibrightness_model),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_B_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,np.log10(B_B),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_D1_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,np.log10(B_D1),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_D2_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,np.log10(B_D2),quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ibrightness_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,B_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ibrightness_error,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
r_avg_v,v_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_model,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_B_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_B,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_D1_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_D1,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_D2_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,v_D2,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.v_data,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
_,v_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.v_error,quantity="velocity",estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=False)
r_avg_sigma,sigma_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_model,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_B_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_B,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_D1_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_D1,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_D2_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,sigma_D2,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.sigma_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
_,sigma_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.sigma_error,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_kin,Nbins=Nbin_kin,azimuth_division=True)
r_avg_ML,ML_avg_model = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_model,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_B_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_B,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_D1_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_D1,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_D2_avg = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,ML_D2,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_avg_data = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ML_data,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
_,ML_avg_err = self.result_obj.radial_profile(self.x_arcsec,self.y_arcsec,self.ML_error,quantity=None,estimator='mean',
min_r=minr,max_r=maxr_phot,Nbins=Nbin_phot,azimuth_division=True)
#B_avg_err = np.nanmean(10**self.ibrightness_data*self.ibrightness_error*np.log(10))
#v_avg_err = np.nanmean(self.v_error)
#sigma_avg_err = np.nanmean(self.sigma_error)
#ML_avg_err = np.nanmean(10**self.ML_data*self.ML_error*np.log(10))
#ML_avg_err = np.nanmean(self.ML_error)
fig = self.data_obj.all_1D_profile_all_outputs(
self.arcsec_to_kpc,
r_avg_B,
B_avg_data,
B_avg_model,
B_B_avg,
B_D1_avg,
B_D2_avg,
r_avg_v,
v_avg_data,
v_avg_model,
v_B_avg,
v_D1_avg,
v_D2_avg,
r_avg_sigma,
sigma_avg_data,
sigma_avg_model,
sigma_B_avg,
sigma_D1_avg,
sigma_D2_avg,
r_avg_ML,
ML_avg_data,
ML_avg_model,
ML_B_avg,
ML_D1_avg,
ML_D2_avg,
B_avg_err,
v_avg_err,
sigma_avg_err,
ML_avg_err,
plot_ML=plot_ML)
fig.savefig(self.output_path+savename)
if show==True:
fig.show()
plt.show()
return fig
def best_params_all_fits(self,write=True,filename='all_params.txt'):
Best_Fit,LE_16,UE_84 = self.result_obj.best_params()
_,LE_05,UE_95 = self.result_obj.best_params(percentile=[50,5,95])
if os.path.exists(filename):
f = open(filename,'a')
f.write(self.galaxy_name+'\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(Best_Fit[nn],decimals=4))+'\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(LE_16[nn],decimals=4))+ '\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(UE_84[nn],decimals=4))+'\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(LE_05[nn],decimals=4))+'\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(UE_95[nn],decimals=4))+'\t')
f.write('\n')
else:
f = open(filename,'w')
f.write('# plate-IFU\t')
for nn in list(Best_Fit.keys()):
f.write(nn+'\t')
for nn in list(Best_Fit.keys()):
f.write(nn+'_LE16 \t')
for nn in list(Best_Fit.keys()):
f.write(nn+'_UE84 \t')
for nn in list(Best_Fit.keys()):
f.write(nn+'_LE05 \t')
for nn in list(Best_Fit.keys()):
f.write(nn+'_UE95 \t')
f.write('\n')
f.write(self.galaxy_name+'\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(Best_Fit[nn],decimals=4))+'\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(LE_16[nn],decimals=4))+ '\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(UE_84[nn],decimals=4))+'\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(LE_05[nn],decimals=4))+'\t')
for nn in list(Best_Fit.keys()):
f.write(str(np.around(UE_95[nn],decimals=4))+'\t')
f.write('\n')
f.close()
def mass_plot(self,min_r=0.1,max_r=10,Nbins=20,figname='mass_profile.png'):
'''
Class Method
----------------------------------------------------------------------
----------------------------------------------------------------------
Plot mass profile for best fit model
Note: this is probably working only with bulge+ inner disc+ outer disc model
'''
r,Mbulge,Mdisc1,Mdisc2,Mhalo = self.result_obj.mass_profile(min_r=min_r,max_r=max_r,Nbins=Nbins)
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(r,Mbulge,'r',lw=3,label='Bulge')
ax.plot(r,Mdisc1,'y',lw=3,label='Inner Disc')
ax.plot(r,Mdisc2,'b',lw=3,label='Outer Disc')
ax.plot(r,Mhalo,'k',lw=3,label='Halo')
ax.set_yscale('log')
ax.set_xlabel('r [kpc]')
ax.set_ylabel(r'$M [M_{\odot}]$')
fig.savefig(self.output_path+figname)
if __name__=='__main__':
pass | PypiClean |
/ANYstructure-4.10.tar.gz/ANYstructure-4.10/any_files/pl_stf_window.py | import pathlib
import tkinter as tk
from _tkinter import TclError
from tkinter.ttk import Combobox
import os
try:
import any_files.example_data as test
import any_files.helper as hlp
except ModuleNotFoundError:
import ANYstructure.any_files.example_data as test
import ANYstructure.any_files.helper as hlp
from matplotlib.backends.backend_tkagg import (
FigureCanvasTkAgg, NavigationToolbar2Tk)
# Implement the default Matplotlib key bindings.
from matplotlib.backend_bases import key_press_handler
from matplotlib.figure import Figure
class CreateStructureWindow():
'''
This is the tkinter GUI for defining plate/stiffener properties.
'''
def __init__(self, master, app):
super(CreateStructureWindow, self).__init__()
self._frame = master
self._frame.wm_title("Define structure properties")
self._frame.geometry('1800x900')
self._frame.grab_set()
self._root_dir = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
self._initial_structure_obj = test.get_structure_calc_object()
self._initial_calc_obj = test.get_structure_calc_object()
self._section_list = []
self._section_objects = []
for section in hlp.helper_read_section_file('bulb_anglebar_tbar_flatbar.csv'):
SecObj = Section(section)
self._section_list = hlp.add_new_section(self._section_list, SecObj)
self._section_objects.append(SecObj)
# m = self._ent_section_list.children['menu']
# m.add_command(label=SecObj.__str__(), command=self.section_choose)
self._clicked_button = ["long stf", "ring stf", "ring frame", "flat long stf", 'flat stf', 'flat girder'][0]
else:
self.app = app
self._clicked_button = app._clicked_section_create# if app._line_is_active else None
try:
if self._clicked_button in ['flat stf', "flat long stf"]:
self._initial_structure_obj = self.app._line_to_struc[app._active_line][0].Stiffener
elif self._clicked_button == 'flat girder':
self._initial_structure_obj = self.app._line_to_struc[app._active_line][5].Girder
elif self._clicked_button in ["long stf"]:
self._initial_structure_obj = self.app._line_to_struc[app._active_line][5].LongStfObj
elif self._clicked_button == "ring stf":
self._initial_structure_obj = self.app._line_to_struc[app._active_line][5].RingStfObj
elif self._clicked_button == "ring frame":
self._initial_structure_obj = self.app._line_to_struc[app._active_line][0].RingFrameObj
else:
self._initial_structure_obj = None
except (KeyError, AttributeError) as error:
self._initial_structure_obj = None
self._section_list = [section.__str__() for section in app._sections]
self._section_objects = app._sections
image_dir = os.path.dirname(__file__) + '\\images\\'
self._opt_runned = False
self._opt_resutls = ()
self._draw_scale = 0.5
self._canvas_dim = (500, 450)
ent_w = 10
start_x, start_y, dx, dy = 20, 70, 60, 33
self._canvas_struc = tk.Canvas(self._frame, width=self._canvas_dim[0], height=self._canvas_dim[1],
background='azure', relief='groove', borderwidth=2)
self.structure_types = ['T','L', 'L-bulb','FB']
self._canvas_struc.place(x=10, y=440)
tk.Label(self._frame, text='-- Define structure properties here --', font='Verdana 15 bold').place(x=10, y=10)
#
# ### Adding matplotlib
# fig, ax = run_section_properties()# Figure(figsize=(4, 4), dpi=100)
# t = np.arange(0, 3, .01)
# #fig.add_subplot(111).plot(t, 2 * np.sin(2 * np.pi * t))
#
# canvas = FigureCanvasTkAgg(fig, master=master) # A tk.DrawingArea.
# canvas.draw()
# canvas.get_tk_widget().place(x=start_x+17*dx, y=start_y+dy )
#
# toolbar = NavigationToolbar2Tk(canvas, master)
# toolbar.update()
# canvas.get_tk_widget().place(x=start_x+17*dx, y=start_y+10*dy )
#
# def on_key_press(event):
# print("you pressed {}".format(event.key))
# key_press_handler(event, canvas, toolbar)
#
# canvas.mpl_connect("key_press_event", on_key_press)
self._new_spacing = tk.DoubleVar()
self._new_pl_thk = tk.DoubleVar()
self._new_web_h = tk.DoubleVar()
self._new_web_thk = tk.DoubleVar()
self._new_fl_w = tk.DoubleVar()
self._new_fl_thk = tk.DoubleVar()
self._new_stiffener_type = tk.StringVar()
self._new_stiffener_filter = tk.StringVar()
self._new_stiffener_filter.set('No filter applied')
self._new_girder_length = tk.DoubleVar()
self._new_section = tk.StringVar()
self._ent_section_list = Combobox(self._frame, values = self._section_list, textvariable = self._new_section,
width = 40)
self._ent_section_list.bind("<<ComboboxSelected>>", self.section_choose)
# self._ent_section_list = tk.OptionMenu(self._frame, self._new_section, command=self.section_choose,
# *['',] if self._section_list == [] else self._section_list)
self._ent_structure_options = tk.OptionMenu(self._frame,self._new_stiffener_type,
command=self.option_choose,*self.structure_types)
self._ent_filter_stf = tk.OptionMenu(self._frame,self._new_stiffener_filter,
command=self.regen_option_menu,*['No filter applied','L-bulb', 'L', 'FB', 'T'])
self._ent_spacing = tk.Entry(self._frame, textvariable=self._new_spacing, width=ent_w)
self._ent_pl_thk = tk.Entry(self._frame, textvariable=self._new_pl_thk, width=ent_w)
self._ent_web_h = tk.Entry(self._frame, textvariable=self._new_web_h, width=ent_w)
self._ent_web_thk = tk.Entry(self._frame, textvariable=self._new_web_thk, width=ent_w)
self._ent_fl_w = tk.Entry(self._frame, textvariable=self._new_fl_w, width=ent_w)
self._ent_fl_thk = tk.Entry(self._frame, textvariable=self._new_fl_thk, width=ent_w)
self._ent_girder_length = tk.Entry(self._frame, textvariable=self._new_girder_length, width=ent_w)
tk.Label(self._frame, text='Stiffener type:', font='Verdana 9 bold').place(x=start_x, y=start_y )
tk.Label(self._frame, text='Girder length (Lg)', font='Verdana 9 bold').place(x=start_x+9*dx,
y=start_y + 15 * dy)
tk.Label(self._frame, text='[m]', font='Verdana 9 bold').place(x=start_x + 14 * dx,y=start_y + 15 * dy)
self._ent_girder_length.place(x=start_x + 12 * dx, y=start_y + 15 * dy)
tk.Label(self._frame, text='[mm]', font='Verdana 9 bold').place(x=start_x+3*dx, y=start_y+dy )
tk.Label(self._frame, text='[mm]', font='Verdana 9 bold').place(x=start_x+3*dx, y=start_y + 2*dy)
tk.Label(self._frame, text='[mm]', font='Verdana 9 bold').place(x=start_x +3*dx, y=start_y + 3*dy)
tk.Label(self._frame, text='[mm]', font='Verdana 9 bold').place(x=start_x+3*dx, y=start_y + 4*dy)
tk.Label(self._frame, text='[mm]', font='Verdana 9 bold').place(x=start_x+3*dx, y=start_y + 5*dy)
tk.Label(self._frame, text='[mm]', font='Verdana 9 bold').place(x=start_x+3*dx, y=start_y + 6*dy)
tk.Label(self._frame, text='Existing sections:', font='Verdana 9 bold').place(x=start_x+4*dx, y=start_y + 6*dy)
tk.Label(self._frame, text='filter ->', font='Verdana 9 bold').place(x=start_x + 4 * dx,
y=start_y + 7 * dy)
self._ent_section_list.place(x=start_x+7*dx, y=start_y + 6*dy)
self._ent_filter_stf.place(x=start_x+5*dx, y=start_y + 7*dy)
tk.Button(self._frame, text='Read section list from file', command=self.read_sections, font='Verdana 10 bold',
bg = 'blue', fg = 'yellow').place(x=start_x+12*dx, y=start_y + 6*dy)
tk.Button(self._frame, text='Load built in sections', command=self.read_sections_built_in, font='Verdana 10 bold',
bg = 'azure', fg = 'black').place(x=start_x+12*dx, y=start_y + 7*dy)
# setting default values
init_dim,init_thk = 0.05,0.002
if self._initial_structure_obj != None:
self._new_stiffener_type.set(self._initial_structure_obj.get_stiffener_type())
self._new_spacing.set(self._initial_structure_obj.get_s()*1000)
self._new_pl_thk.set(self._initial_structure_obj.get_pl_thk()*1000)
self._new_web_h.set(self._initial_structure_obj.get_web_h()*1000)
self._new_web_thk.set(self._initial_structure_obj.get_web_thk()*1000)
self._new_fl_w.set(self._initial_structure_obj.get_fl_w()*1000)
self._new_fl_thk.set(self._initial_structure_obj.get_fl_thk()*1000)
else:
self._new_spacing.set(0)
self._new_pl_thk.set(0)
self._new_web_h.set(0)
self._new_web_thk.set(0)
self._new_fl_w.set(0)
self._new_fl_thk.set(0)
self._new_girder_length.set(10)
self._ent_structure_options.place(x=start_x + dx * 3, y=start_y)
if self._new_spacing.get() != 0:
tk.Label(self._frame, text='Spacing', font='Verdana 9').place(x=start_x, y=start_y + dy)
self._ent_spacing.place(x=start_x + dx * 2, y=start_y+dy)
if self._new_pl_thk.get() != 0:
tk.Label(self._frame, text='Plate thk.', font='Verdana 9').place(x=start_x, y=start_y + 2 * dy)
self._ent_pl_thk.place(x=start_x + dx * 2, y=start_y+2*dy)
if self._new_web_h.get() != 0:
tk.Label(self._frame, text='Web height', font='Verdana 9').place(x=start_x, y=start_y + 3 * dy)
self._ent_web_h.place(x=start_x + dx * 2, y=start_y+3*dy)
if self._new_web_thk.get() != 0:
tk.Label(self._frame, text='Web thk.', font='Verdana 9').place(x=start_x, y=start_y + 4 * dy)
self._ent_web_thk.place(x=start_x + dx * 2, y=start_y+4*dy)
if self._new_fl_w.get() != 0:
tk.Label(self._frame, text='Flange width', font='Verdana 9').place(x=start_x, y=start_y + 5 * dy)
self._ent_fl_w.place(x=start_x + dx * 2, y=start_y+5*dy)
if self._new_fl_thk.get() != 0:
tk.Label(self._frame, text='Flange thk.', font='Verdana 9').place(x=start_x, y=start_y + 6 * dy)
self._ent_fl_thk.place(x=start_x + dx * 2, y=start_y+6*dy)
self._new_spacing.trace('w',self.draw_trace)
self._new_pl_thk.trace('w',self.draw_trace)
self._new_web_h.trace('w',self.draw_trace)
self._new_web_thk.trace('w',self.draw_trace)
self._new_fl_w.trace('w',self.draw_trace)
self._new_fl_thk.trace('w',self.draw_trace)
try:
img_file_name = 'img_stiffened_plate_panel.gif'
if os.path.isfile('images/' + img_file_name):
file_path = 'images/' + img_file_name
else:
file_path = os.path.dirname(os.path.abspath(__file__)) + '/images/' + img_file_name
photo = tk.PhotoImage(file=file_path)
label = tk.Label(self._frame, image=photo)
label.image = photo # keep a reference!
label.place(x=550, y=610)
except TclError:
pass
try:
img_file_name = 'img_T_L_FB.gif'
if os.path.isfile('images/' + img_file_name):
file_path = 'images/' + img_file_name
else:
file_path = os.path.dirname(os.path.abspath(__file__)) + '/images/' + img_file_name
photo_T_L_FB = tk.PhotoImage(file=file_path)
label = tk.Label(self._frame, image=photo_T_L_FB )
label.image = photo_T_L_FB # keep a reference!
label.place(x=270, y=50)
except TclError:
pass
# Close and save depending on input
# "long stf", "ring stf", "ring frame", "flat long stf"
if self._clicked_button is not None:
self.close_and_save = tk.Button(self._frame, text='Click to return section data to ' + self._clicked_button,
command=self.save_and_close, bg='green',
font='Verdana 10 bold', fg='yellow')
self.close_and_save.place(x=start_x + dx * 9, y=start_y + dy * 12)
self.draw_properties()
def regen_option_menu(self, event = None):
self._ent_section_list.destroy()
sections = []
if self._section_list == []:
sections = ['',]
elif self._new_stiffener_filter.get() == 'No filter applied':
sections = self._section_list
else:
for sec_obj in self._section_objects:
if sec_obj.stf_type == self._new_stiffener_filter.get():
sections.append(sec_obj.__str__())
start_x, start_y, dx, dy = 20, 70, 60, 33
# self._ent_section_list = tk.OptionMenu(self._frame, self._new_section, command=self.section_choose,
# *sections)
self._ent_section_list = Combobox(self._frame, values=sections, textvariable=self._new_section, width = 40)
self._ent_section_list.bind("<<ComboboxSelected>>", self.section_choose)
self._ent_section_list.place(x=start_x + 7 * dx, y=start_y + 6 * dy)
pass
def option_choose(self, event):
'''
Action when the option menu is changed.
:param event:
:return:
'''
start_x, start_y, dx, dy = 20, 70, 50, 33
tk.Label(self._frame, text='Spacing', font='Verdana 9').place(x=start_x, y=start_y + dy)
self._ent_spacing.place(x=start_x + dx * 2, y=start_y+dy)
tk.Label(self._frame, text='Plate thk.', font='Verdana 9').place(x=start_x, y=start_y + 2 * dy)
self._ent_pl_thk.place(x=start_x + dx * 2, y=start_y+2*dy)
tk.Label(self._frame, text='Web height', font='Verdana 9').place(x=start_x, y=start_y + 3 * dy)
self._ent_web_h.place(x=start_x + dx * 2, y=start_y+3*dy)
tk.Label(self._frame, text='Web thk.', font='Verdana 9').place(x=start_x, y=start_y + 4 * dy)
self._ent_web_thk.place(x=start_x + dx * 2, y=start_y+4*dy)
if self._new_stiffener_type.get()!='FB':
tk.Label(self._frame, text='Flange width', font='Verdana 9').place(x=start_x, y=start_y + 5 * dy)
self._ent_fl_w.place(x=start_x + dx * 2, y=start_y+5*dy)
else: self._ent_fl_w.place_forget()
if self._new_stiffener_type.get()!='FB':
tk.Label(self._frame, text='Flange thk.', font='Verdana 9').place(x=start_x, y=start_y + 6 * dy)
self._ent_fl_thk.place(x=start_x + dx * 2, y=start_y+6*dy)
else: self._ent_fl_thk.place_forget()
if self._new_stiffener_type.get()=='FB':
self._new_fl_w.set(0)
self._new_fl_thk.set(0)
self.draw_properties()
def checkered(self, line_distance):
'''
Grid lines in the properties canvas.
:param line_distance:
:return:
'''
# vertical lines at an interval of "line_distance" pixel
for x in range(line_distance, self._canvas_dim[0], line_distance):
self._canvas_struc.create_line(x, 0, x, self._canvas_dim[0], fill="grey", stipple='gray50')
# horizontal lines at an interval of "line_distance" pixel
for y in range(line_distance, self._canvas_dim[1], line_distance):
self._canvas_struc.create_line(0, y, self._canvas_dim[0], y, fill="grey", stipple='gray50')
def draw_properties(self):
'''
Drawing properties in the canvas.
:return:
'''
self._canvas_struc.delete('all')
self.checkered(10)
ctr_x = self._canvas_dim[0] / 2
ctr_y = self._canvas_dim[1] / 2 + 200
m = self._draw_scale
init_color, init_stipple = 'blue', 'gray50'
try: spacing = self._new_spacing.get()
except TclError: spacing = 0
try: pl_thk = self._new_pl_thk.get()
except TclError: pl_thk = 0
try: web_h = self._new_web_h.get()
except TclError: web_h = 0
try: web_thk = self._new_web_thk.get()
except TclError: web_thk = 0
try: fl_w = self._new_fl_w.get()
except TclError: fl_w = 0
try: fl_thk = self._new_fl_thk.get()
except TclError: fl_thk = 0
self._canvas_struc.create_rectangle(0, 0, self._canvas_dim[0] + 10, 70, fill='white')
self._canvas_struc.create_text(250, 15, text='Plate: ' + str(spacing ) + 'x' +
str(pl_thk ),font='Verdana 10 bold',fill='black')
self._canvas_struc.create_rectangle(ctr_x - m * spacing / 2, ctr_y,ctr_x + m * spacing / 2,
ctr_y - m * pl_thk, fill='black', stipple=init_stipple)
self._canvas_struc.create_text(250, 35, text='Web: ' + str(web_h ) + 'x'+ str(web_thk )
,font='Verdana 10 bold',fill='blue')
self._canvas_struc.create_rectangle(ctr_x - m * web_thk / 2,ctr_y - m * pl_thk,ctr_x + m * web_thk / 2,
ctr_y - m * (web_h+ pl_thk), fill='blue', stipple=init_stipple)
self._canvas_struc.create_text(250, 55, text='Flange: '+ str(fl_w ) + 'x'+ str(fl_thk ),
font='Verdana 10 bold',fill='red')
if self._new_stiffener_type.get() in ['L', 'L-bulb']:
self._canvas_struc.create_rectangle(ctr_x - m * web_thk / 2, ctr_y- m * (pl_thk + web_h),ctr_x + m * fl_w,
ctr_y - m * (pl_thk + web_h + fl_thk),fill='red', stipple=init_stipple)
else:
self._canvas_struc.create_rectangle(ctr_x - m * fl_w / 2, ctr_y- m * (pl_thk + web_h),ctr_x + m * fl_w / 2,
ctr_y - m * (pl_thk + web_h + fl_thk),fill='red', stipple=init_stipple)
def draw_trace(self,*args):
'''
Updating when values in entered
:param event:
:return:
'''
self.draw_properties()
def save_and_close(self):
'''
Save and close
:return:
'''
if __name__ == '__main__':
self._frame.destroy()
return
self.app.on_close_structure_window([float(num) for num in [self._new_spacing.get(),self._new_pl_thk.get(),
self._new_web_h.get(),self._new_web_thk.get(),
self._new_fl_w.get(),self._new_fl_thk.get()]] +
[self._new_stiffener_type.get(), self._clicked_button])
self._frame.destroy()
def section_choose(self, event = None):
''' Choosing a section. '''
#chosen_section = self._new_section.get()
chosen_section = event.widget.get()
for section in self._section_objects:
if chosen_section == section.__str__():
self._new_web_h.set(section.stf_web_height*1000)
self._new_web_thk.set(section.stf_web_thk*1000)
self._new_fl_w.set(section.stf_flange_width*1000)
self._new_fl_thk.set(section.stf_flange_thk*1000)
self._new_stiffener_type.set(section.stf_type)
self.option_choose(None)
def read_sections(self):
'''
Read a list.
'''
from tkinter import filedialog
import any_files.helper as hlp
from pathlib import Path
file = filedialog.askopenfile('r')
file = Path(file.name)
#m = self._ent_section_list.children['menu']
for section in hlp.helper_read_section_file(file.name):
SecObj = Section(section)
self._section_list = hlp.add_new_section(self._section_list, SecObj)
self._section_objects.append(SecObj)
#m.add_command(label=SecObj.__str__(), command=self.section_choose)
def read_sections_built_in(self):
'''
Read a list.
'''
import any_files.helper as hlp
if pathlib.Path('bulb_anglebar_tbar_flatbar.csv').exists():
libfile = 'bulb_anglebar_tbar_flatbar.csv'
else:
libfile = 'bulb_anglebar_tbar_flatbar.csv'
libfile = self._root_dir + '/' + libfile
for section in hlp.helper_read_section_file(libfile):
SecObj = Section(section)
self._section_list = hlp.add_new_section(self._section_list, SecObj)
self._section_objects.append(SecObj)
#m.add_command(label=SecObj.__str__(), command=self.section_choose)
self.regen_option_menu()
class Section:
'''
Creates a section property.
'stf_type': [self._new_stf_type.get(), ''],
'stf_web_height': [self._new_stf_web_h.get()/1000, 'm'],
'stf_web_thk': [self._new_sft_web_t.get()/1000, 'm'],
'stf_flange_width': [self._new_stf_fl_w.get()/1000, 'm'],
'stf_flange_thk': [self._new_stf_fl_t.get()/1000, 'm'],
'''
def __init__(self, input_dict):
super(Section, self).__init__()
self._stf_type = input_dict['stf_type'] if type(input_dict['stf_type']) != list \
else input_dict['stf_type'][0]
self._stf_web_height = input_dict['stf_web_height']if type(input_dict['stf_web_height']) != list \
else input_dict['stf_web_height'][0]
self._stf_web_thk = input_dict['stf_web_thk']if type(input_dict['stf_web_thk']) != list \
else input_dict['stf_web_thk'][0]
self._stf_flange_width = input_dict['stf_flange_width']if type(input_dict['stf_flange_width']) != list \
else input_dict['stf_flange_width'][0]
self._stf_flange_thk = input_dict['stf_flange_thk']if type(input_dict['stf_flange_thk']) != list \
else input_dict['stf_flange_thk'][0]
def __str__(self):
''' Returning a string. '''
base_name = self.stf_type+ '_' + str(round(self.stf_web_height*1000, 0)) + 'x' + \
str(round(self.stf_web_thk*1000, 0))
if self._stf_type == 'FB':
ret_str = base_name
elif self._stf_type in ['L-bulb', 'bulb', 'hp']:
ret_str = 'Bulb'+str(int(self.stf_web_height*1000 + self.stf_flange_thk*1000))+'x'+\
str(round(self.stf_web_thk*1000, 0))+ '__' +str(round(self.stf_web_height*1000, 0)) + 'x' + \
str(round(self.stf_web_thk*1000, 0))+ str(round(self.stf_flange_width*1000, 0)) + 'x' + \
str(round(self.stf_flange_thk*1000, 0))
else:
ret_str = base_name + '__' + str(round(self.stf_flange_width*1000, 0)) + 'x' + \
str(round(self.stf_flange_thk*1000, 0))
ret_str = ret_str.replace('.', '_')
return ret_str
@property
def stf_type(self):
return self._stf_type
@stf_type.setter
def stf_type(self, value):
self._stf_type = value
@property
def stf_web_height(self):
return self._stf_web_height
@stf_web_height.setter
def stf_web_height(self, value):
self._stf_web_height = value
@property
def stf_web_thk(self):
return self._stf_web_thk
@stf_web_thk.setter
def stf_web_thk(self, value):
self._stf_web_thk = value
@property
def stf_flange_width(self):
return self._stf_flange_width
@stf_flange_width.setter
def stf_flange_width(self, value):
self._stf_flange_width = value
@property
def stf_flange_thk(self):
return self._stf_flange_thk
@stf_flange_thk.setter
def stf_flange_thk(self, value):
self._stf_flange_thk = value
def return_puls_input(self):
'''
Returns as input good for PULS
:return:
'''
return {'Stiffener type (L,T,F)': self.stf_type, 'Stiffener boundary': 'C',
'Stiff. Height': self.stf_web_height*1000,
'Web thick.': self.stf_web_thk*1000, 'Flange width': self.stf_flange_width*1000,
'Flange thick.': self.stf_flange_thk*1000}
# def run_section_properties(pl_s = 0.75, pl_t = 0.015, hw = 0.4, tw = 0.018, bf = 0.15, tf = 0.02):
# import sectionproperties.pre.sections as sections
# from sectionproperties.analysis.cross_section import CrossSection
# from matplotlib import pyplot as plt
#
# # create a 50 diameter circle discretised by 64 points
# geometry = sections.MonoISection(
# d=(pl_t+hw+tf)*1000, b_t=bf*1000, b_b=pl_s*1000, t_ft=tf*1000, t_fb=pl_t*1000, t_w=tw*1000, r=8, n_r=16
# )
# mesh = geometry.create_mesh(mesh_sizes=[3.0])
# section = CrossSection(geometry, mesh) # create a CrossSection object
# mesh_nodes = section.mesh_nodes
# mesh_elements = section.mesh_elements
# # plot the mesh
# (fig, ax) = plt.subplots(figsize=(4, 4), dpi=100)
# ax.triplot(mesh_nodes[:, 0], mesh_nodes[:, 1], mesh_elements[:, 0:3], lw=0.5)
# # #section.display_mesh_info() # display the mesh information
# # ax = section.plot_mesh(pause=True) # plot the generated mesh
# #
# # # perform a geometric, warping and plastic analysis, displaying the time info
# # section.calculate_geometric_properties(time_info=True)
# # section.calculate_warping_properties(time_info=True)
# # section.calculate_plastic_properties(time_info=True)
# #
# # # print the results to the terminal
# # section.display_results()
# #
# # # get the second moments of area and the torsion constant
# # (ixx_c, iyy_c, ixy_c) = section.get_ic()
# # j = section.get_j()
# #
# # # print the sum of the second moments of area and the torsion constant
# # print("Ixx + Iyy = {0:.3f}".format(ixx_c + iyy_c))
# # print("J = {0:.3f}".format(j))
# return fig, ax
#
#
if __name__ == '__main__':
# sec1 = Section({'stf_type': 'T', 'stf_web_height': 0.35, 'stf_web_thk': 0.02, 'stf_flange_width': 0.15,
# 'stf_flange_thk': 0.015})
#
# sec_list = [sec1, Section({'stf_type': 'FB', 'stf_web_height': 0.35, 'stf_web_thk': 0.02, 'stf_flange_width': 0,
# 'stf_flange_thk': 0}), Section({'stf_type': 'T', 'stf_web_height': 0.4, 'stf_web_thk': 0.02,
# 'stf_flange_width': 0.15, 'stf_flange_thk': 0.02})]
#
# hlp.add_new_section(sec_list, sec1)
# run_section_properties()
root = tk.Tk()
my_app = CreateStructureWindow(root, app=None)
root.mainloop() | PypiClean |
/GailBot-0.2a0-py3-none-any.whl/gailbot/plugins/loader/directoryloader.py | import os
import pip
from cryptography.fernet import Fernet
from typing import Dict, List, Union, TypedDict, Tuple
from dataclasses import dataclass
from .pluginLoader import PluginLoader
from ..suite import PluginSuite
from gailbot.core.utils.logger import makelogger
from gailbot.core.utils.general import (
filepaths_in_dir,
get_name,
get_extension,
copy,
read_toml,
get_parent_path,
is_directory,
delete,
)
from config_backend import PROJECT_ROOT
from gailbot.configs import PLUGIN_CONFIG
from pydantic import BaseModel, ValidationError
logger = makelogger("plugin directory loader")
class PluginDict(BaseModel):
"""dictionary type for individual plugin"""
plugin_name: str
dependencies: List[str]
module_name: str
rel_path: str
class ConfDict(TypedDict):
"""dictionary type for plugin suite configuration dictionary"""
suite_name: str
plugins: List[PluginDict]
class MetaData(BaseModel):
Version: str
Author: str
class ConfModel(BaseModel):
"""dictionary type for plugin suite configuration dictionary"""
metadata: MetaData
suite_name: str
plugins: List[PluginDict]
class PluginDirectoryLoader(PluginLoader):
"""load the plugin suite from a directory that contains all source
script implementing the plugins, and a toml file that stores
configuration information to load the plugin
"""
def __init__(
self,
suites_dir: str,
):
"""initialize a plugin directory loader
Args:
suites_dir (str): the path to the directory that stores all the
copies of plugins will be stored and managed
by plugin manager
"""
self.suites_dir = suites_dir
self.toml_loader = PluginTOMLLoader()
def load(self, suite_dir_path: str) -> Union[PluginSuite, bool]:
"""load the plugin from a directory
Args:
suite_dir_path (str): path to the source directory that contains
the entire plugin suite
Returns:
return a PluginSuite object that stores the loaded suite
if the plugin can be successfully loaded, return false otherwise
"""
if (not type(suite_dir_path) == str) or (not is_directory(suite_dir_path)):
logger.info(suite_dir_path)
logger.error("not a plugin")
# check for invalid input
return False
suite_dir_name = get_name(suite_dir_path)
logger.info(f"suite name is {suite_dir_name}, suite path is {suite_dir_path}")
tgt_path = f"{self.suites_dir}/{get_name(suite_dir_path)}"
config = None
requirement = None
official = None
document = None
format = None
# search for the requirements and config file
for root, dirs, files in os.walk(suite_dir_path):
if PLUGIN_CONFIG.REQUIREMENT in files:
requirement = os.path.join(root, PLUGIN_CONFIG.REQUIREMENT)
if PLUGIN_CONFIG.CONFIG in files:
config = os.path.join(root, PLUGIN_CONFIG.CONFIG)
if PLUGIN_CONFIG.DOCUMENT in files:
document = os.path.join(root, PLUGIN_CONFIG.DOCUMENT)
if PLUGIN_CONFIG.OFFICIAL in files:
official = os.path.join(root, PLUGIN_CONFIG.OFFICIAL)
if PLUGIN_CONFIG.FORMAT in files:
format = os.path.join(root, PLUGIN_CONFIG.FORMAT)
if config and requirement and document and official and format:
break
if not config or not document or not format:
logger.error(f"missing required file")
return False
# download required package
try:
if requirement:
self.download_packages(requirement, PROJECT_ROOT)
except Exception as e:
logger.error(f"failed to download package", exc_info=e)
return False
# make a copy of the original plugin suite
if not is_directory(tgt_path):
copy(suite_dir_path, tgt_path)
if is_directory(tgt_path) and self.suites_dir not in suite_dir_path:
delete(tgt_path)
copy(suite_dir_path, tgt_path)
suite = self.toml_loader.load(config, suite_dir_name, self.suites_dir)
if suite:
# validate
if self.validate_official(official):
suite.set_to_official_suite()
return [suite]
else:
delete(tgt_path)
logger.warn(
f"the plugin suite validation failed, delete suite at {tgt_path}"
)
return False
def download_packages(self, req_file, dest):
"""download packages listed under req_file to dest
Args:
req_file(str): a string that specifies the path to requirements.txt file
dest (str): a string to the directory where the file will be downloaded
"""
if hasattr(pip, "main"):
pip.main(["install", "-t", str(dest), "-r", req_file])
def validate_official(self, file):
"""given a file that stores the key, verify the key
Args:
file (str): path to the file
Returns:
bool: return true if the key matches with the official gailbot plugin
"""
if not file:
return False
try:
with open(file, "r") as f:
key = f.read()
fernet = Fernet(PLUGIN_CONFIG.OFFICIAL_ENKEY)
decrypt = fernet.decrypt(key)
if decrypt == PLUGIN_CONFIG.OFFICIAL_KEY:
return True
else:
return False
except Exception as e:
logger.error(e, exc_info=e)
return False
class PluginTOMLLoader(PluginLoader):
"""import all modules in the plugin, all plugin sources and dependencies
are described in a configuration file in toml format
"""
def __init__(self):
self.dict_config_loader = PluginDictLoader()
def load(
self, conf_path: str, suite_name: str, suites_directory: str
) -> PluginSuite:
"""given the path to configuration file of one plugin suite, and
the suites directory that stores all plugin suites ,
import the plugin suite described in the configuration file
Args:
conf_path (str): a path to the configuration file
suites_directory (str): a path to the directory that contain
all plugin suites
Returns:
PluginSuite:
return a PluginSuite object that stores the loaded suite
if the plugin can be successfully loaded, return false otherwise
"""
validated, conf = PluginTOMLLoader.validate_config(conf_path, suite_name)
if validated:
conf.update({"path": get_parent_path(conf_path)})
return self.dict_config_loader.load(conf, suites_directory)
else:
logger.error(f"Plugin suite toml file validation failed Error: {conf}")
return False
@staticmethod
def validate_config(
conf_path: str, suite_name: str
) -> Tuple[bool, Union[str, Dict]]:
"""
validate if the plugin configuration file is in the correct format
Args:
conf_path (str): path to the configuration file
suite_name (str): suite name
Returns:
Tuple(bool, Union[str, Dict]):
for valid configuration, return True and dictionary that stores the
toml file information;
for invalid configuration file, return False and error message
"""
if not type(conf_path) == str:
return (False, "Invalid file path")
if (not os.path.isfile(conf_path)) or (not get_extension(conf_path) == "toml"):
return (False, "Invalid file path")
dict_conf = read_toml(conf_path)
try:
ConfModel(**dict_conf)
except ValidationError as e:
logger.error(f"invalid scheme {e}")
return (False, f"invalid scheme {e}")
if dict_conf["suite_name"] == suite_name:
return (True, dict_conf)
else:
logger.error(f"suite name is {suite_name}")
return (False, "suite name must be the same as the folder name")
class PluginDictLoader(PluginLoader):
"""load a plugin suite from a dictionary that contains the configuration
of all plugin dependencies and sources
"""
def load(self, dict_conf: Dict, suites_directory: str) -> PluginSuite:
if not type(dict_conf) == dict:
return ""
try:
suite = PluginSuite(dict_conf, suites_directory)
assert suite.is_ready
return suite
except Exception as e:
logger.error(e, exc_info=e)
return False | PypiClean |
/Flask-Turbo-Boost-0.2.8.tar.gz/Flask-Turbo-Boost-0.2.8/flask_turbo_boost/project/application/static/js/prod.js | !function(a){"use strict";a.matchMedia=a.matchMedia||function(a){var b,c=a.documentElement,d=c.firstElementChild||c.firstChild,e=a.createElement("body"),f=a.createElement("div");return f.id="mq-test-1",f.style.cssText="position:absolute;top:-100em",e.style.background="none",e.appendChild(f),function(a){return f.innerHTML='­<style media="'+a+'"> #mq-test-1 { width: 42px; }</style>',c.insertBefore(e,d),b=42===f.offsetWidth,c.removeChild(e),{matches:b,media:a}}}(a.document)}(this),function(a){"use strict";function b(){u(!0)}var c={};a.respond=c,c.update=function(){};var d=[],e=function(){var b=!1;try{b=new a.XMLHttpRequest}catch(c){b=new a.ActiveXObject("Microsoft.XMLHTTP")}return function(){return b}}(),f=function(a,b){var c=e();c&&(c.open("GET",a,!0),c.onreadystatechange=function(){4!==c.readyState||200!==c.status&&304!==c.status||b(c.responseText)},4!==c.readyState&&c.send(null))};if(c.ajax=f,c.queue=d,c.regex={media:/@media[^\{]+\{([^\{\}]*\{[^\}\{]*\})+/gi,keyframes:/@(?:\-(?:o|moz|webkit)\-)?keyframes[^\{]+\{(?:[^\{\}]*\{[^\}\{]*\})+[^\}]*\}/gi,urls:/(url\()['"]?([^\/\)'"][^:\)'"]+)['"]?(\))/g,findStyles:/@media *([^\{]+)\{([\S\s]+?)$/,only:/(only\s+)?([a-zA-Z]+)\s?/,minw:/\([\s]*min\-width\s*:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/,maxw:/\([\s]*max\-width\s*:[\s]*([\s]*[0-9\.]+)(px|em)[\s]*\)/},c.mediaQueriesSupported=a.matchMedia&&null!==a.matchMedia("only all")&&a.matchMedia("only all").matches,!c.mediaQueriesSupported){var g,h,i,j=a.document,k=j.documentElement,l=[],m=[],n=[],o={},p=30,q=j.getElementsByTagName("head")[0]||k,r=j.getElementsByTagName("base")[0],s=q.getElementsByTagName("link"),t=function(){var a,b=j.createElement("div"),c=j.body,d=k.style.fontSize,e=c&&c.style.fontSize,f=!1;return b.style.cssText="position:absolute;font-size:1em;width:1em",c||(c=f=j.createElement("body"),c.style.background="none"),k.style.fontSize="100%",c.style.fontSize="100%",c.appendChild(b),f&&k.insertBefore(c,k.firstChild),a=b.offsetWidth,f?k.removeChild(c):c.removeChild(b),k.style.fontSize=d,e&&(c.style.fontSize=e),a=i=parseFloat(a)},u=function(b){var c="clientWidth",d=k[c],e="CSS1Compat"===j.compatMode&&d||j.body[c]||d,f={},o=s[s.length-1],r=(new Date).getTime();if(b&&g&&p>r-g)return a.clearTimeout(h),h=a.setTimeout(u,p),void 0;g=r;for(var v in l)if(l.hasOwnProperty(v)){var w=l[v],x=w.minw,y=w.maxw,z=null===x,A=null===y,B="em";x&&(x=parseFloat(x)*(x.indexOf(B)>-1?i||t():1)),y&&(y=parseFloat(y)*(y.indexOf(B)>-1?i||t():1)),w.hasquery&&(z&&A||!(z||e>=x)||!(A||y>=e))||(f[w.media]||(f[w.media]=[]),f[w.media].push(m[w.rules]))}for(var C in n)n.hasOwnProperty(C)&&n[C]&&n[C].parentNode===q&&q.removeChild(n[C]);n.length=0;for(var D in f)if(f.hasOwnProperty(D)){var E=j.createElement("style"),F=f[D].join("\n");E.type="text/css",E.media=D,q.insertBefore(E,o.nextSibling),E.styleSheet?E.styleSheet.cssText=F:E.appendChild(j.createTextNode(F)),n.push(E)}},v=function(a,b,d){var e=a.replace(c.regex.keyframes,"").match(c.regex.media),f=e&&e.length||0;b=b.substring(0,b.lastIndexOf("/"));var g=function(a){return a.replace(c.regex.urls,"$1"+b+"$2$3")},h=!f&&d;b.length&&(b+="/"),h&&(f=1);for(var i=0;f>i;i++){var j,k,n,o;h?(j=d,m.push(g(a))):(j=e[i].match(c.regex.findStyles)&&RegExp.$1,m.push(RegExp.$2&&g(RegExp.$2))),n=j.split(","),o=n.length;for(var p=0;o>p;p++)k=n[p],l.push({media:k.split("(")[0].match(c.regex.only)&&RegExp.$2||"all",rules:m.length-1,hasquery:k.indexOf("(")>-1,minw:k.match(c.regex.minw)&&parseFloat(RegExp.$1)+(RegExp.$2||""),maxw:k.match(c.regex.maxw)&&parseFloat(RegExp.$1)+(RegExp.$2||"")})}u()},w=function(){if(d.length){var b=d.shift();f(b.href,function(c){v(c,b.href,b.media),o[b.href]=!0,a.setTimeout(function(){w()},0)})}},x=function(){for(var b=0;b<s.length;b++){var c=s[b],e=c.href,f=c.media,g=c.rel&&"stylesheet"===c.rel.toLowerCase();e&&g&&!o[e]&&(c.styleSheet&&c.styleSheet.rawCssText?(v(c.styleSheet.rawCssText,e,f),o[e]=!0):(!/^([a-zA-Z:]*\/\/)/.test(e)&&!r||e.replace(RegExp.$1,"").split("/")[0]===a.location.host)&&("//"===e.substring(0,2)&&(e=a.location.protocol+e),d.push({href:e,media:f})))}w()};x(),c.update=x,c.getEmValue=t,a.addEventListener?a.addEventListener("resize",b,!1):a.attachEvent&&a.attachEvent("onresize",b)}}(this);
/*! jQuery v3.1.1 | (c) jQuery Foundation | jquery.org/license */
!function(a,b){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){"use strict";var c=[],d=a.document,e=Object.getPrototypeOf,f=c.slice,g=c.concat,h=c.push,i=c.indexOf,j={},k=j.toString,l=j.hasOwnProperty,m=l.toString,n=m.call(Object),o={};function p(a,b){b=b||d;var c=b.createElement("script");c.text=a,b.head.appendChild(c).parentNode.removeChild(c)}var q="3.1.1",r=function(a,b){return new r.fn.init(a,b)},s=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,t=/^-ms-/,u=/-([a-z])/g,v=function(a,b){return b.toUpperCase()};r.fn=r.prototype={jquery:q,constructor:r,length:0,toArray:function(){return f.call(this)},get:function(a){return null==a?f.call(this):a<0?this[a+this.length]:this[a]},pushStack:function(a){var b=r.merge(this.constructor(),a);return b.prevObject=this,b},each:function(a){return r.each(this,a)},map:function(a){return this.pushStack(r.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(f.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(a<0?b:0);return this.pushStack(c>=0&&c<b?[this[c]]:[])},end:function(){return this.prevObject||this.constructor()},push:h,sort:c.sort,splice:c.splice},r.extend=r.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||r.isFunction(g)||(g={}),h===i&&(g=this,h--);h<i;h++)if(null!=(a=arguments[h]))for(b in a)c=g[b],d=a[b],g!==d&&(j&&d&&(r.isPlainObject(d)||(e=r.isArray(d)))?(e?(e=!1,f=c&&r.isArray(c)?c:[]):f=c&&r.isPlainObject(c)?c:{},g[b]=r.extend(j,f,d)):void 0!==d&&(g[b]=d));return g},r.extend({expando:"jQuery"+(q+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===r.type(a)},isArray:Array.isArray,isWindow:function(a){return null!=a&&a===a.window},isNumeric:function(a){var b=r.type(a);return("number"===b||"string"===b)&&!isNaN(a-parseFloat(a))},isPlainObject:function(a){var b,c;return!(!a||"[object Object]"!==k.call(a))&&(!(b=e(a))||(c=l.call(b,"constructor")&&b.constructor,"function"==typeof c&&m.call(c)===n))},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?j[k.call(a)]||"object":typeof a},globalEval:function(a){p(a)},camelCase:function(a){return a.replace(t,"ms-").replace(u,v)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b){var c,d=0;if(w(a)){for(c=a.length;d<c;d++)if(b.call(a[d],d,a[d])===!1)break}else for(d in a)if(b.call(a[d],d,a[d])===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(s,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(w(Object(a))?r.merge(c,"string"==typeof a?[a]:a):h.call(c,a)),c},inArray:function(a,b,c){return null==b?-1:i.call(b,a,c)},merge:function(a,b){for(var c=+b.length,d=0,e=a.length;d<c;d++)a[e++]=b[d];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;f<g;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,e,f=0,h=[];if(w(a))for(d=a.length;f<d;f++)e=b(a[f],f,c),null!=e&&h.push(e);else for(f in a)e=b(a[f],f,c),null!=e&&h.push(e);return g.apply([],h)},guid:1,proxy:function(a,b){var c,d,e;if("string"==typeof b&&(c=a[b],b=a,a=c),r.isFunction(a))return d=f.call(arguments,2),e=function(){return a.apply(b||this,d.concat(f.call(arguments)))},e.guid=a.guid=a.guid||r.guid++,e},now:Date.now,support:o}),"function"==typeof Symbol&&(r.fn[Symbol.iterator]=c[Symbol.iterator]),r.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),function(a,b){j["[object "+b+"]"]=b.toLowerCase()});function w(a){var b=!!a&&"length"in a&&a.length,c=r.type(a);return"function"!==c&&!r.isWindow(a)&&("array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a)}var x=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+1*new Date,v=a.document,w=0,x=0,y=ha(),z=ha(),A=ha(),B=function(a,b){return a===b&&(l=!0),0},C={}.hasOwnProperty,D=[],E=D.pop,F=D.push,G=D.push,H=D.slice,I=function(a,b){for(var c=0,d=a.length;c<d;c++)if(a[c]===b)return c;return-1},J="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",K="[\\x20\\t\\r\\n\\f]",L="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",M="\\["+K+"*("+L+")(?:"+K+"*([*^$|!~]?=)"+K+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+L+"))|)"+K+"*\\]",N=":("+L+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+M+")*)|.*)\\)|)",O=new RegExp(K+"+","g"),P=new RegExp("^"+K+"+|((?:^|[^\\\\])(?:\\\\.)*)"+K+"+$","g"),Q=new RegExp("^"+K+"*,"+K+"*"),R=new RegExp("^"+K+"*([>+~]|"+K+")"+K+"*"),S=new RegExp("="+K+"*([^\\]'\"]*?)"+K+"*\\]","g"),T=new RegExp(N),U=new RegExp("^"+L+"$"),V={ID:new RegExp("^#("+L+")"),CLASS:new RegExp("^\\.("+L+")"),TAG:new RegExp("^("+L+"|[*])"),ATTR:new RegExp("^"+M),PSEUDO:new RegExp("^"+N),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+K+"*(even|odd|(([+-]|)(\\d*)n|)"+K+"*(?:([+-]|)"+K+"*(\\d+)|))"+K+"*\\)|)","i"),bool:new RegExp("^(?:"+J+")$","i"),needsContext:new RegExp("^"+K+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+K+"*((?:-\\d)?\\d*)"+K+"*\\)|)(?=[^-]|$)","i")},W=/^(?:input|select|textarea|button)$/i,X=/^h\d$/i,Y=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,$=/[+~]/,_=new RegExp("\\\\([\\da-f]{1,6}"+K+"?|("+K+")|.)","ig"),aa=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:d<0?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)},ba=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ca=function(a,b){return b?"\0"===a?"\ufffd":a.slice(0,-1)+"\\"+a.charCodeAt(a.length-1).toString(16)+" ":"\\"+a},da=function(){m()},ea=ta(function(a){return a.disabled===!0&&("form"in a||"label"in a)},{dir:"parentNode",next:"legend"});try{G.apply(D=H.call(v.childNodes),v.childNodes),D[v.childNodes.length].nodeType}catch(fa){G={apply:D.length?function(a,b){F.apply(a,H.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function ga(a,b,d,e){var f,h,j,k,l,o,r,s=b&&b.ownerDocument,w=b?b.nodeType:9;if(d=d||[],"string"!=typeof a||!a||1!==w&&9!==w&&11!==w)return d;if(!e&&((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,p)){if(11!==w&&(l=Z.exec(a)))if(f=l[1]){if(9===w){if(!(j=b.getElementById(f)))return d;if(j.id===f)return d.push(j),d}else if(s&&(j=s.getElementById(f))&&t(b,j)&&j.id===f)return d.push(j),d}else{if(l[2])return G.apply(d,b.getElementsByTagName(a)),d;if((f=l[3])&&c.getElementsByClassName&&b.getElementsByClassName)return G.apply(d,b.getElementsByClassName(f)),d}if(c.qsa&&!A[a+" "]&&(!q||!q.test(a))){if(1!==w)s=b,r=a;else if("object"!==b.nodeName.toLowerCase()){(k=b.getAttribute("id"))?k=k.replace(ba,ca):b.setAttribute("id",k=u),o=g(a),h=o.length;while(h--)o[h]="#"+k+" "+sa(o[h]);r=o.join(","),s=$.test(a)&&qa(b.parentNode)||b}if(r)try{return G.apply(d,s.querySelectorAll(r)),d}catch(x){}finally{k===u&&b.removeAttribute("id")}}}return i(a.replace(P,"$1"),b,d,e)}function ha(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function ia(a){return a[u]=!0,a}function ja(a){var b=n.createElement("fieldset");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function ka(a,b){var c=a.split("|"),e=c.length;while(e--)d.attrHandle[c[e]]=b}function la(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&a.sourceIndex-b.sourceIndex;if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function ma(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function na(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function oa(a){return function(b){return"form"in b?b.parentNode&&b.disabled===!1?"label"in b?"label"in b.parentNode?b.parentNode.disabled===a:b.disabled===a:b.isDisabled===a||b.isDisabled!==!a&&ea(b)===a:b.disabled===a:"label"in b&&b.disabled===a}}function pa(a){return ia(function(b){return b=+b,ia(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function qa(a){return a&&"undefined"!=typeof a.getElementsByTagName&&a}c=ga.support={},f=ga.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return!!b&&"HTML"!==b.nodeName},m=ga.setDocument=function(a){var b,e,g=a?a.ownerDocument||a:v;return g!==n&&9===g.nodeType&&g.documentElement?(n=g,o=n.documentElement,p=!f(n),v!==n&&(e=n.defaultView)&&e.top!==e&&(e.addEventListener?e.addEventListener("unload",da,!1):e.attachEvent&&e.attachEvent("onunload",da)),c.attributes=ja(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ja(function(a){return a.appendChild(n.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=Y.test(n.getElementsByClassName),c.getById=ja(function(a){return o.appendChild(a).id=u,!n.getElementsByName||!n.getElementsByName(u).length}),c.getById?(d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){return a.getAttribute("id")===b}},d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c=b.getElementById(a);return c?[c]:[]}}):(d.filter.ID=function(a){var b=a.replace(_,aa);return function(a){var c="undefined"!=typeof a.getAttributeNode&&a.getAttributeNode("id");return c&&c.value===b}},d.find.ID=function(a,b){if("undefined"!=typeof b.getElementById&&p){var c,d,e,f=b.getElementById(a);if(f){if(c=f.getAttributeNode("id"),c&&c.value===a)return[f];e=b.getElementsByName(a),d=0;while(f=e[d++])if(c=f.getAttributeNode("id"),c&&c.value===a)return[f]}return[]}}),d.find.TAG=c.getElementsByTagName?function(a,b){return"undefined"!=typeof b.getElementsByTagName?b.getElementsByTagName(a):c.qsa?b.querySelectorAll(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){if("undefined"!=typeof b.getElementsByClassName&&p)return b.getElementsByClassName(a)},r=[],q=[],(c.qsa=Y.test(n.querySelectorAll))&&(ja(function(a){o.appendChild(a).innerHTML="<a id='"+u+"'></a><select id='"+u+"-\r\\' msallowcapture=''><option selected=''></option></select>",a.querySelectorAll("[msallowcapture^='']").length&&q.push("[*^$]="+K+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+K+"*(?:value|"+J+")"),a.querySelectorAll("[id~="+u+"-]").length||q.push("~="),a.querySelectorAll(":checked").length||q.push(":checked"),a.querySelectorAll("a#"+u+"+*").length||q.push(".#.+[+~]")}),ja(function(a){a.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var b=n.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+K+"*[*^$|!~]?="),2!==a.querySelectorAll(":enabled").length&&q.push(":enabled",":disabled"),o.appendChild(a).disabled=!0,2!==a.querySelectorAll(":disabled").length&&q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=Y.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ja(function(a){c.disconnectedMatch=s.call(a,"*"),s.call(a,"[s!='']:x"),r.push("!=",N)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=Y.test(o.compareDocumentPosition),t=b||Y.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===n||a.ownerDocument===v&&t(v,a)?-1:b===n||b.ownerDocument===v&&t(v,b)?1:k?I(k,a)-I(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,e=a.parentNode,f=b.parentNode,g=[a],h=[b];if(!e||!f)return a===n?-1:b===n?1:e?-1:f?1:k?I(k,a)-I(k,b):0;if(e===f)return la(a,b);c=a;while(c=c.parentNode)g.unshift(c);c=b;while(c=c.parentNode)h.unshift(c);while(g[d]===h[d])d++;return d?la(g[d],h[d]):g[d]===v?-1:h[d]===v?1:0},n):n},ga.matches=function(a,b){return ga(a,null,null,b)},ga.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(S,"='$1']"),c.matchesSelector&&p&&!A[b+" "]&&(!r||!r.test(b))&&(!q||!q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return ga(b,n,null,[a]).length>0},ga.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},ga.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&C.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},ga.escape=function(a){return(a+"").replace(ba,ca)},ga.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},ga.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=ga.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=ga.selectors={cacheLength:50,createPseudo:ia,match:V,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(_,aa),a[3]=(a[3]||a[4]||a[5]||"").replace(_,aa),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||ga.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&ga.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return V.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&T.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(_,aa).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+K+")"+a+"("+K+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||"undefined"!=typeof a.getAttribute&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=ga.attr(d,a);return null==e?"!="===b:!b||(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e.replace(O," ")+" ").indexOf(c)>-1:"|="===b&&(e===c||e.slice(0,c.length+1)===c+"-"))}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h,t=!1;if(q){if(f){while(p){m=b;while(m=m[p])if(h?m.nodeName.toLowerCase()===r:1===m.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){m=q,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n&&j[2],m=n&&q.childNodes[n];while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if(1===m.nodeType&&++t&&m===b){k[a]=[w,n,t];break}}else if(s&&(m=b,l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),j=k[a]||[],n=j[0]===w&&j[1],t=n),t===!1)while(m=++n&&m&&m[p]||(t=n=0)||o.pop())if((h?m.nodeName.toLowerCase()===r:1===m.nodeType)&&++t&&(s&&(l=m[u]||(m[u]={}),k=l[m.uniqueID]||(l[m.uniqueID]={}),k[a]=[w,t]),m===b))break;return t-=e,t===d||t%d===0&&t/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||ga.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?ia(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=I(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:ia(function(a){var b=[],c=[],d=h(a.replace(P,"$1"));return d[u]?ia(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),b[0]=null,!c.pop()}}),has:ia(function(a){return function(b){return ga(a,b).length>0}}),contains:ia(function(a){return a=a.replace(_,aa),function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:ia(function(a){return U.test(a||"")||ga.error("unsupported lang: "+a),a=a.replace(_,aa).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:oa(!1),disabled:oa(!0),checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return X.test(a.nodeName)},input:function(a){return W.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:pa(function(){return[0]}),last:pa(function(a,b){return[b-1]}),eq:pa(function(a,b,c){return[c<0?c+b:c]}),even:pa(function(a,b){for(var c=0;c<b;c+=2)a.push(c);return a}),odd:pa(function(a,b){for(var c=1;c<b;c+=2)a.push(c);return a}),lt:pa(function(a,b,c){for(var d=c<0?c+b:c;--d>=0;)a.push(d);return a}),gt:pa(function(a,b,c){for(var d=c<0?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=ma(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=na(b);function ra(){}ra.prototype=d.filters=d.pseudos,d.setFilters=new ra,g=ga.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){c&&!(e=Q.exec(h))||(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=R.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(P," ")}),h=h.slice(c.length));for(g in d.filter)!(e=V[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?ga.error(a):z(a,i).slice(0)};function sa(a){for(var b=0,c=a.length,d="";b<c;b++)d+=a[b].value;return d}function ta(a,b,c){var d=b.dir,e=b.next,f=e||d,g=c&&"parentNode"===f,h=x++;return b.first?function(b,c,e){while(b=b[d])if(1===b.nodeType||g)return a(b,c,e);return!1}:function(b,c,i){var j,k,l,m=[w,h];if(i){while(b=b[d])if((1===b.nodeType||g)&&a(b,c,i))return!0}else while(b=b[d])if(1===b.nodeType||g)if(l=b[u]||(b[u]={}),k=l[b.uniqueID]||(l[b.uniqueID]={}),e&&e===b.nodeName.toLowerCase())b=b[d]||b;else{if((j=k[f])&&j[0]===w&&j[1]===h)return m[2]=j[2];if(k[f]=m,m[2]=a(b,c,i))return!0}return!1}}function ua(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function va(a,b,c){for(var d=0,e=b.length;d<e;d++)ga(a,b[d],c);return c}function wa(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;h<i;h++)(f=a[h])&&(c&&!c(f,d,e)||(g.push(f),j&&b.push(h)));return g}function xa(a,b,c,d,e,f){return d&&!d[u]&&(d=xa(d)),e&&!e[u]&&(e=xa(e,f)),ia(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||va(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:wa(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=wa(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?I(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=wa(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):G.apply(g,r)})}function ya(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=ta(function(a){return a===b},h,!0),l=ta(function(a){return I(b,a)>-1},h,!0),m=[function(a,c,d){var e=!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d));return b=null,e}];i<f;i++)if(c=d.relative[a[i].type])m=[ta(ua(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;e<f;e++)if(d.relative[a[e].type])break;return xa(i>1&&ua(m),i>1&&sa(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(P,"$1"),c,i<e&&ya(a.slice(i,e)),e<f&&ya(a=a.slice(e)),e<f&&sa(a))}m.push(c)}return ua(m)}function za(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,o,q,r=0,s="0",t=f&&[],u=[],v=j,x=f||e&&d.find.TAG("*",k),y=w+=null==v?1:Math.random()||.1,z=x.length;for(k&&(j=g===n||g||k);s!==z&&null!=(l=x[s]);s++){if(e&&l){o=0,g||l.ownerDocument===n||(m(l),h=!p);while(q=a[o++])if(q(l,g||n,h)){i.push(l);break}k&&(w=y)}c&&((l=!q&&l)&&r--,f&&t.push(l))}if(r+=s,c&&s!==r){o=0;while(q=b[o++])q(t,u,g,h);if(f){if(r>0)while(s--)t[s]||u[s]||(u[s]=E.call(i));u=wa(u)}G.apply(i,u),k&&!f&&u.length>0&&r+b.length>1&&ga.uniqueSort(i)}return k&&(w=y,j=v),t};return c?ia(f):f}return h=ga.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=ya(b[c]),f[u]?d.push(f):e.push(f);f=A(a,za(e,d)),f.selector=a}return f},i=ga.select=function(a,b,c,e){var f,i,j,k,l,m="function"==typeof a&&a,n=!e&&g(a=m.selector||a);if(c=c||[],1===n.length){if(i=n[0]=n[0].slice(0),i.length>2&&"ID"===(j=i[0]).type&&9===b.nodeType&&p&&d.relative[i[1].type]){if(b=(d.find.ID(j.matches[0].replace(_,aa),b)||[])[0],!b)return c;m&&(b=b.parentNode),a=a.slice(i.shift().value.length)}f=V.needsContext.test(a)?0:i.length;while(f--){if(j=i[f],d.relative[k=j.type])break;if((l=d.find[k])&&(e=l(j.matches[0].replace(_,aa),$.test(i[0].type)&&qa(b.parentNode)||b))){if(i.splice(f,1),a=e.length&&sa(i),!a)return G.apply(c,e),c;break}}}return(m||h(a,n))(e,b,!p,c,!b||$.test(a)&&qa(b.parentNode)||b),c},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ja(function(a){return 1&a.compareDocumentPosition(n.createElement("fieldset"))}),ja(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||ka("type|href|height|width",function(a,b,c){if(!c)return a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ja(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||ka("value",function(a,b,c){if(!c&&"input"===a.nodeName.toLowerCase())return a.defaultValue}),ja(function(a){return null==a.getAttribute("disabled")})||ka(J,function(a,b,c){var d;if(!c)return a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),ga}(a);r.find=x,r.expr=x.selectors,r.expr[":"]=r.expr.pseudos,r.uniqueSort=r.unique=x.uniqueSort,r.text=x.getText,r.isXMLDoc=x.isXML,r.contains=x.contains,r.escapeSelector=x.escape;var y=function(a,b,c){var d=[],e=void 0!==c;while((a=a[b])&&9!==a.nodeType)if(1===a.nodeType){if(e&&r(a).is(c))break;d.push(a)}return d},z=function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c},A=r.expr.match.needsContext,B=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i,C=/^.[^:#\[\.,]*$/;function D(a,b,c){return r.isFunction(b)?r.grep(a,function(a,d){return!!b.call(a,d,a)!==c}):b.nodeType?r.grep(a,function(a){return a===b!==c}):"string"!=typeof b?r.grep(a,function(a){return i.call(b,a)>-1!==c}):C.test(b)?r.filter(b,a,c):(b=r.filter(b,a),r.grep(a,function(a){return i.call(b,a)>-1!==c&&1===a.nodeType}))}r.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?r.find.matchesSelector(d,a)?[d]:[]:r.find.matches(a,r.grep(b,function(a){return 1===a.nodeType}))},r.fn.extend({find:function(a){var b,c,d=this.length,e=this;if("string"!=typeof a)return this.pushStack(r(a).filter(function(){for(b=0;b<d;b++)if(r.contains(e[b],this))return!0}));for(c=this.pushStack([]),b=0;b<d;b++)r.find(a,e[b],c);return d>1?r.uniqueSort(c):c},filter:function(a){return this.pushStack(D(this,a||[],!1))},not:function(a){return this.pushStack(D(this,a||[],!0))},is:function(a){return!!D(this,"string"==typeof a&&A.test(a)?r(a):a||[],!1).length}});var E,F=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,G=r.fn.init=function(a,b,c){var e,f;if(!a)return this;if(c=c||E,"string"==typeof a){if(e="<"===a[0]&&">"===a[a.length-1]&&a.length>=3?[null,a,null]:F.exec(a),!e||!e[1]&&b)return!b||b.jquery?(b||c).find(a):this.constructor(b).find(a);if(e[1]){if(b=b instanceof r?b[0]:b,r.merge(this,r.parseHTML(e[1],b&&b.nodeType?b.ownerDocument||b:d,!0)),B.test(e[1])&&r.isPlainObject(b))for(e in b)r.isFunction(this[e])?this[e](b[e]):this.attr(e,b[e]);return this}return f=d.getElementById(e[2]),f&&(this[0]=f,this.length=1),this}return a.nodeType?(this[0]=a,this.length=1,this):r.isFunction(a)?void 0!==c.ready?c.ready(a):a(r):r.makeArray(a,this)};G.prototype=r.fn,E=r(d);var H=/^(?:parents|prev(?:Until|All))/,I={children:!0,contents:!0,next:!0,prev:!0};r.fn.extend({has:function(a){var b=r(a,this),c=b.length;return this.filter(function(){for(var a=0;a<c;a++)if(r.contains(this,b[a]))return!0})},closest:function(a,b){var c,d=0,e=this.length,f=[],g="string"!=typeof a&&r(a);if(!A.test(a))for(;d<e;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&r.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?r.uniqueSort(f):f)},index:function(a){return a?"string"==typeof a?i.call(r(a),this[0]):i.call(this,a.jquery?a[0]:a):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(r.uniqueSort(r.merge(this.get(),r(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function J(a,b){while((a=a[b])&&1!==a.nodeType);return a}r.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return y(a,"parentNode")},parentsUntil:function(a,b,c){return y(a,"parentNode",c)},next:function(a){return J(a,"nextSibling")},prev:function(a){return J(a,"previousSibling")},nextAll:function(a){return y(a,"nextSibling")},prevAll:function(a){return y(a,"previousSibling")},nextUntil:function(a,b,c){return y(a,"nextSibling",c)},prevUntil:function(a,b,c){return y(a,"previousSibling",c)},siblings:function(a){return z((a.parentNode||{}).firstChild,a)},children:function(a){return z(a.firstChild)},contents:function(a){return a.contentDocument||r.merge([],a.childNodes)}},function(a,b){r.fn[a]=function(c,d){var e=r.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=r.filter(d,e)),this.length>1&&(I[a]||r.uniqueSort(e),H.test(a)&&e.reverse()),this.pushStack(e)}});var K=/[^\x20\t\r\n\f]+/g;function L(a){var b={};return r.each(a.match(K)||[],function(a,c){b[c]=!0}),b}r.Callbacks=function(a){a="string"==typeof a?L(a):r.extend({},a);var b,c,d,e,f=[],g=[],h=-1,i=function(){for(e=a.once,d=b=!0;g.length;h=-1){c=g.shift();while(++h<f.length)f[h].apply(c[0],c[1])===!1&&a.stopOnFalse&&(h=f.length,c=!1)}a.memory||(c=!1),b=!1,e&&(f=c?[]:"")},j={add:function(){return f&&(c&&!b&&(h=f.length-1,g.push(c)),function d(b){r.each(b,function(b,c){r.isFunction(c)?a.unique&&j.has(c)||f.push(c):c&&c.length&&"string"!==r.type(c)&&d(c)})}(arguments),c&&!b&&i()),this},remove:function(){return r.each(arguments,function(a,b){var c;while((c=r.inArray(b,f,c))>-1)f.splice(c,1),c<=h&&h--}),this},has:function(a){return a?r.inArray(a,f)>-1:f.length>0},empty:function(){return f&&(f=[]),this},disable:function(){return e=g=[],f=c="",this},disabled:function(){return!f},lock:function(){return e=g=[],c||b||(f=c=""),this},locked:function(){return!!e},fireWith:function(a,c){return e||(c=c||[],c=[a,c.slice?c.slice():c],g.push(c),b||i()),this},fire:function(){return j.fireWith(this,arguments),this},fired:function(){return!!d}};return j};function M(a){return a}function N(a){throw a}function O(a,b,c){var d;try{a&&r.isFunction(d=a.promise)?d.call(a).done(b).fail(c):a&&r.isFunction(d=a.then)?d.call(a,b,c):b.call(void 0,a)}catch(a){c.call(void 0,a)}}r.extend({Deferred:function(b){var c=[["notify","progress",r.Callbacks("memory"),r.Callbacks("memory"),2],["resolve","done",r.Callbacks("once memory"),r.Callbacks("once memory"),0,"resolved"],["reject","fail",r.Callbacks("once memory"),r.Callbacks("once memory"),1,"rejected"]],d="pending",e={state:function(){return d},always:function(){return f.done(arguments).fail(arguments),this},"catch":function(a){return e.then(null,a)},pipe:function(){var a=arguments;return r.Deferred(function(b){r.each(c,function(c,d){var e=r.isFunction(a[d[4]])&&a[d[4]];f[d[1]](function(){var a=e&&e.apply(this,arguments);a&&r.isFunction(a.promise)?a.promise().progress(b.notify).done(b.resolve).fail(b.reject):b[d[0]+"With"](this,e?[a]:arguments)})}),a=null}).promise()},then:function(b,d,e){var f=0;function g(b,c,d,e){return function(){var h=this,i=arguments,j=function(){var a,j;if(!(b<f)){if(a=d.apply(h,i),a===c.promise())throw new TypeError("Thenable self-resolution");j=a&&("object"==typeof a||"function"==typeof a)&&a.then,r.isFunction(j)?e?j.call(a,g(f,c,M,e),g(f,c,N,e)):(f++,j.call(a,g(f,c,M,e),g(f,c,N,e),g(f,c,M,c.notifyWith))):(d!==M&&(h=void 0,i=[a]),(e||c.resolveWith)(h,i))}},k=e?j:function(){try{j()}catch(a){r.Deferred.exceptionHook&&r.Deferred.exceptionHook(a,k.stackTrace),b+1>=f&&(d!==N&&(h=void 0,i=[a]),c.rejectWith(h,i))}};b?k():(r.Deferred.getStackHook&&(k.stackTrace=r.Deferred.getStackHook()),a.setTimeout(k))}}return r.Deferred(function(a){c[0][3].add(g(0,a,r.isFunction(e)?e:M,a.notifyWith)),c[1][3].add(g(0,a,r.isFunction(b)?b:M)),c[2][3].add(g(0,a,r.isFunction(d)?d:N))}).promise()},promise:function(a){return null!=a?r.extend(a,e):e}},f={};return r.each(c,function(a,b){var g=b[2],h=b[5];e[b[1]]=g.add,h&&g.add(function(){d=h},c[3-a][2].disable,c[0][2].lock),g.add(b[3].fire),f[b[0]]=function(){return f[b[0]+"With"](this===f?void 0:this,arguments),this},f[b[0]+"With"]=g.fireWith}),e.promise(f),b&&b.call(f,f),f},when:function(a){var b=arguments.length,c=b,d=Array(c),e=f.call(arguments),g=r.Deferred(),h=function(a){return function(c){d[a]=this,e[a]=arguments.length>1?f.call(arguments):c,--b||g.resolveWith(d,e)}};if(b<=1&&(O(a,g.done(h(c)).resolve,g.reject),"pending"===g.state()||r.isFunction(e[c]&&e[c].then)))return g.then();while(c--)O(e[c],h(c),g.reject);return g.promise()}});var P=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;r.Deferred.exceptionHook=function(b,c){a.console&&a.console.warn&&b&&P.test(b.name)&&a.console.warn("jQuery.Deferred exception: "+b.message,b.stack,c)},r.readyException=function(b){a.setTimeout(function(){throw b})};var Q=r.Deferred();r.fn.ready=function(a){return Q.then(a)["catch"](function(a){r.readyException(a)}),this},r.extend({isReady:!1,readyWait:1,holdReady:function(a){a?r.readyWait++:r.ready(!0)},ready:function(a){(a===!0?--r.readyWait:r.isReady)||(r.isReady=!0,a!==!0&&--r.readyWait>0||Q.resolveWith(d,[r]))}}),r.ready.then=Q.then;function R(){d.removeEventListener("DOMContentLoaded",R),
a.removeEventListener("load",R),r.ready()}"complete"===d.readyState||"loading"!==d.readyState&&!d.documentElement.doScroll?a.setTimeout(r.ready):(d.addEventListener("DOMContentLoaded",R),a.addEventListener("load",R));var S=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===r.type(c)){e=!0;for(h in c)S(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,r.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(r(a),c)})),b))for(;h<i;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},T=function(a){return 1===a.nodeType||9===a.nodeType||!+a.nodeType};function U(){this.expando=r.expando+U.uid++}U.uid=1,U.prototype={cache:function(a){var b=a[this.expando];return b||(b={},T(a)&&(a.nodeType?a[this.expando]=b:Object.defineProperty(a,this.expando,{value:b,configurable:!0}))),b},set:function(a,b,c){var d,e=this.cache(a);if("string"==typeof b)e[r.camelCase(b)]=c;else for(d in b)e[r.camelCase(d)]=b[d];return e},get:function(a,b){return void 0===b?this.cache(a):a[this.expando]&&a[this.expando][r.camelCase(b)]},access:function(a,b,c){return void 0===b||b&&"string"==typeof b&&void 0===c?this.get(a,b):(this.set(a,b,c),void 0!==c?c:b)},remove:function(a,b){var c,d=a[this.expando];if(void 0!==d){if(void 0!==b){r.isArray(b)?b=b.map(r.camelCase):(b=r.camelCase(b),b=b in d?[b]:b.match(K)||[]),c=b.length;while(c--)delete d[b[c]]}(void 0===b||r.isEmptyObject(d))&&(a.nodeType?a[this.expando]=void 0:delete a[this.expando])}},hasData:function(a){var b=a[this.expando];return void 0!==b&&!r.isEmptyObject(b)}};var V=new U,W=new U,X=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,Y=/[A-Z]/g;function Z(a){return"true"===a||"false"!==a&&("null"===a?null:a===+a+""?+a:X.test(a)?JSON.parse(a):a)}function $(a,b,c){var d;if(void 0===c&&1===a.nodeType)if(d="data-"+b.replace(Y,"-$&").toLowerCase(),c=a.getAttribute(d),"string"==typeof c){try{c=Z(c)}catch(e){}W.set(a,b,c)}else c=void 0;return c}r.extend({hasData:function(a){return W.hasData(a)||V.hasData(a)},data:function(a,b,c){return W.access(a,b,c)},removeData:function(a,b){W.remove(a,b)},_data:function(a,b,c){return V.access(a,b,c)},_removeData:function(a,b){V.remove(a,b)}}),r.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=W.get(f),1===f.nodeType&&!V.get(f,"hasDataAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=r.camelCase(d.slice(5)),$(f,d,e[d])));V.set(f,"hasDataAttrs",!0)}return e}return"object"==typeof a?this.each(function(){W.set(this,a)}):S(this,function(b){var c;if(f&&void 0===b){if(c=W.get(f,a),void 0!==c)return c;if(c=$(f,a),void 0!==c)return c}else this.each(function(){W.set(this,a,b)})},null,b,arguments.length>1,null,!0)},removeData:function(a){return this.each(function(){W.remove(this,a)})}}),r.extend({queue:function(a,b,c){var d;if(a)return b=(b||"fx")+"queue",d=V.get(a,b),c&&(!d||r.isArray(c)?d=V.access(a,b,r.makeArray(c)):d.push(c)),d||[]},dequeue:function(a,b){b=b||"fx";var c=r.queue(a,b),d=c.length,e=c.shift(),f=r._queueHooks(a,b),g=function(){r.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return V.get(a,c)||V.access(a,c,{empty:r.Callbacks("once memory").add(function(){V.remove(a,[b+"queue",c])})})}}),r.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?r.queue(this[0],a):void 0===b?this:this.each(function(){var c=r.queue(this,a,b);r._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&r.dequeue(this,a)})},dequeue:function(a){return this.each(function(){r.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=r.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=V.get(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var _=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,aa=new RegExp("^(?:([+-])=|)("+_+")([a-z%]*)$","i"),ba=["Top","Right","Bottom","Left"],ca=function(a,b){return a=b||a,"none"===a.style.display||""===a.style.display&&r.contains(a.ownerDocument,a)&&"none"===r.css(a,"display")},da=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};function ea(a,b,c,d){var e,f=1,g=20,h=d?function(){return d.cur()}:function(){return r.css(a,b,"")},i=h(),j=c&&c[3]||(r.cssNumber[b]?"":"px"),k=(r.cssNumber[b]||"px"!==j&&+i)&&aa.exec(r.css(a,b));if(k&&k[3]!==j){j=j||k[3],c=c||[],k=+i||1;do f=f||".5",k/=f,r.style(a,b,k+j);while(f!==(f=h()/i)&&1!==f&&--g)}return c&&(k=+k||+i||0,e=c[1]?k+(c[1]+1)*c[2]:+c[2],d&&(d.unit=j,d.start=k,d.end=e)),e}var fa={};function ga(a){var b,c=a.ownerDocument,d=a.nodeName,e=fa[d];return e?e:(b=c.body.appendChild(c.createElement(d)),e=r.css(b,"display"),b.parentNode.removeChild(b),"none"===e&&(e="block"),fa[d]=e,e)}function ha(a,b){for(var c,d,e=[],f=0,g=a.length;f<g;f++)d=a[f],d.style&&(c=d.style.display,b?("none"===c&&(e[f]=V.get(d,"display")||null,e[f]||(d.style.display="")),""===d.style.display&&ca(d)&&(e[f]=ga(d))):"none"!==c&&(e[f]="none",V.set(d,"display",c)));for(f=0;f<g;f++)null!=e[f]&&(a[f].style.display=e[f]);return a}r.fn.extend({show:function(){return ha(this,!0)},hide:function(){return ha(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){ca(this)?r(this).show():r(this).hide()})}});var ia=/^(?:checkbox|radio)$/i,ja=/<([a-z][^\/\0>\x20\t\r\n\f]+)/i,ka=/^$|\/(?:java|ecma)script/i,la={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};la.optgroup=la.option,la.tbody=la.tfoot=la.colgroup=la.caption=la.thead,la.th=la.td;function ma(a,b){var c;return c="undefined"!=typeof a.getElementsByTagName?a.getElementsByTagName(b||"*"):"undefined"!=typeof a.querySelectorAll?a.querySelectorAll(b||"*"):[],void 0===b||b&&r.nodeName(a,b)?r.merge([a],c):c}function na(a,b){for(var c=0,d=a.length;c<d;c++)V.set(a[c],"globalEval",!b||V.get(b[c],"globalEval"))}var oa=/<|&#?\w+;/;function pa(a,b,c,d,e){for(var f,g,h,i,j,k,l=b.createDocumentFragment(),m=[],n=0,o=a.length;n<o;n++)if(f=a[n],f||0===f)if("object"===r.type(f))r.merge(m,f.nodeType?[f]:f);else if(oa.test(f)){g=g||l.appendChild(b.createElement("div")),h=(ja.exec(f)||["",""])[1].toLowerCase(),i=la[h]||la._default,g.innerHTML=i[1]+r.htmlPrefilter(f)+i[2],k=i[0];while(k--)g=g.lastChild;r.merge(m,g.childNodes),g=l.firstChild,g.textContent=""}else m.push(b.createTextNode(f));l.textContent="",n=0;while(f=m[n++])if(d&&r.inArray(f,d)>-1)e&&e.push(f);else if(j=r.contains(f.ownerDocument,f),g=ma(l.appendChild(f),"script"),j&&na(g),c){k=0;while(f=g[k++])ka.test(f.type||"")&&c.push(f)}return l}!function(){var a=d.createDocumentFragment(),b=a.appendChild(d.createElement("div")),c=d.createElement("input");c.setAttribute("type","radio"),c.setAttribute("checked","checked"),c.setAttribute("name","t"),b.appendChild(c),o.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,b.innerHTML="<textarea>x</textarea>",o.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue}();var qa=d.documentElement,ra=/^key/,sa=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,ta=/^([^.]*)(?:\.(.+)|)/;function ua(){return!0}function va(){return!1}function wa(){try{return d.activeElement}catch(a){}}function xa(a,b,c,d,e,f){var g,h;if("object"==typeof b){"string"!=typeof c&&(d=d||c,c=void 0);for(h in b)xa(a,h,c,d,b[h],f);return a}if(null==d&&null==e?(e=c,d=c=void 0):null==e&&("string"==typeof c?(e=d,d=void 0):(e=d,d=c,c=void 0)),e===!1)e=va;else if(!e)return a;return 1===f&&(g=e,e=function(a){return r().off(a),g.apply(this,arguments)},e.guid=g.guid||(g.guid=r.guid++)),a.each(function(){r.event.add(this,b,e,d,c)})}r.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=V.get(a);if(q){c.handler&&(f=c,c=f.handler,e=f.selector),e&&r.find.matchesSelector(qa,e),c.guid||(c.guid=r.guid++),(i=q.events)||(i=q.events={}),(g=q.handle)||(g=q.handle=function(b){return"undefined"!=typeof r&&r.event.triggered!==b.type?r.event.dispatch.apply(a,arguments):void 0}),b=(b||"").match(K)||[""],j=b.length;while(j--)h=ta.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n&&(l=r.event.special[n]||{},n=(e?l.delegateType:l.bindType)||n,l=r.event.special[n]||{},k=r.extend({type:n,origType:p,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&r.expr.match.needsContext.test(e),namespace:o.join(".")},f),(m=i[n])||(m=i[n]=[],m.delegateCount=0,l.setup&&l.setup.call(a,d,o,g)!==!1||a.addEventListener&&a.addEventListener(n,g)),l.add&&(l.add.call(a,k),k.handler.guid||(k.handler.guid=c.guid)),e?m.splice(m.delegateCount++,0,k):m.push(k),r.event.global[n]=!0)}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,m,n,o,p,q=V.hasData(a)&&V.get(a);if(q&&(i=q.events)){b=(b||"").match(K)||[""],j=b.length;while(j--)if(h=ta.exec(b[j])||[],n=p=h[1],o=(h[2]||"").split(".").sort(),n){l=r.event.special[n]||{},n=(d?l.delegateType:l.bindType)||n,m=i[n]||[],h=h[2]&&new RegExp("(^|\\.)"+o.join("\\.(?:.*\\.|)")+"(\\.|$)"),g=f=m.length;while(f--)k=m[f],!e&&p!==k.origType||c&&c.guid!==k.guid||h&&!h.test(k.namespace)||d&&d!==k.selector&&("**"!==d||!k.selector)||(m.splice(f,1),k.selector&&m.delegateCount--,l.remove&&l.remove.call(a,k));g&&!m.length&&(l.teardown&&l.teardown.call(a,o,q.handle)!==!1||r.removeEvent(a,n,q.handle),delete i[n])}else for(n in i)r.event.remove(a,n+b[j],c,d,!0);r.isEmptyObject(i)&&V.remove(a,"handle events")}},dispatch:function(a){var b=r.event.fix(a),c,d,e,f,g,h,i=new Array(arguments.length),j=(V.get(this,"events")||{})[b.type]||[],k=r.event.special[b.type]||{};for(i[0]=b,c=1;c<arguments.length;c++)i[c]=arguments[c];if(b.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,b)!==!1){h=r.event.handlers.call(this,b,j),c=0;while((f=h[c++])&&!b.isPropagationStopped()){b.currentTarget=f.elem,d=0;while((g=f.handlers[d++])&&!b.isImmediatePropagationStopped())b.rnamespace&&!b.rnamespace.test(g.namespace)||(b.handleObj=g,b.data=g.data,e=((r.event.special[g.origType]||{}).handle||g.handler).apply(f.elem,i),void 0!==e&&(b.result=e)===!1&&(b.preventDefault(),b.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,b),b.result}},handlers:function(a,b){var c,d,e,f,g,h=[],i=b.delegateCount,j=a.target;if(i&&j.nodeType&&!("click"===a.type&&a.button>=1))for(;j!==this;j=j.parentNode||this)if(1===j.nodeType&&("click"!==a.type||j.disabled!==!0)){for(f=[],g={},c=0;c<i;c++)d=b[c],e=d.selector+" ",void 0===g[e]&&(g[e]=d.needsContext?r(e,this).index(j)>-1:r.find(e,this,null,[j]).length),g[e]&&f.push(d);f.length&&h.push({elem:j,handlers:f})}return j=this,i<b.length&&h.push({elem:j,handlers:b.slice(i)}),h},addProp:function(a,b){Object.defineProperty(r.Event.prototype,a,{enumerable:!0,configurable:!0,get:r.isFunction(b)?function(){if(this.originalEvent)return b(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[a]},set:function(b){Object.defineProperty(this,a,{enumerable:!0,configurable:!0,writable:!0,value:b})}})},fix:function(a){return a[r.expando]?a:new r.Event(a)},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==wa()&&this.focus)return this.focus(),!1},delegateType:"focusin"},blur:{trigger:function(){if(this===wa()&&this.blur)return this.blur(),!1},delegateType:"focusout"},click:{trigger:function(){if("checkbox"===this.type&&this.click&&r.nodeName(this,"input"))return this.click(),!1},_default:function(a){return r.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}}},r.removeEvent=function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c)},r.Event=function(a,b){return this instanceof r.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ua:va,this.target=a.target&&3===a.target.nodeType?a.target.parentNode:a.target,this.currentTarget=a.currentTarget,this.relatedTarget=a.relatedTarget):this.type=a,b&&r.extend(this,b),this.timeStamp=a&&a.timeStamp||r.now(),void(this[r.expando]=!0)):new r.Event(a,b)},r.Event.prototype={constructor:r.Event,isDefaultPrevented:va,isPropagationStopped:va,isImmediatePropagationStopped:va,isSimulated:!1,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ua,a&&!this.isSimulated&&a.preventDefault()},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ua,a&&!this.isSimulated&&a.stopPropagation()},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ua,a&&!this.isSimulated&&a.stopImmediatePropagation(),this.stopPropagation()}},r.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,"char":!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(a){var b=a.button;return null==a.which&&ra.test(a.type)?null!=a.charCode?a.charCode:a.keyCode:!a.which&&void 0!==b&&sa.test(a.type)?1&b?1:2&b?3:4&b?2:0:a.which}},r.event.addProp),r.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){r.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return e&&(e===d||r.contains(d,e))||(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),r.fn.extend({on:function(a,b,c,d){return xa(this,a,b,c,d)},one:function(a,b,c,d){return xa(this,a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,r(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return b!==!1&&"function"!=typeof b||(c=b,b=void 0),c===!1&&(c=va),this.each(function(){r.event.remove(this,a,c,b)})}});var ya=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,za=/<script|<style|<link/i,Aa=/checked\s*(?:[^=]|=\s*.checked.)/i,Ba=/^true\/(.*)/,Ca=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Da(a,b){return r.nodeName(a,"table")&&r.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a:a}function Ea(a){return a.type=(null!==a.getAttribute("type"))+"/"+a.type,a}function Fa(a){var b=Ba.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function Ga(a,b){var c,d,e,f,g,h,i,j;if(1===b.nodeType){if(V.hasData(a)&&(f=V.access(a),g=V.set(b,f),j=f.events)){delete g.handle,g.events={};for(e in j)for(c=0,d=j[e].length;c<d;c++)r.event.add(b,e,j[e][c])}W.hasData(a)&&(h=W.access(a),i=r.extend({},h),W.set(b,i))}}function Ha(a,b){var c=b.nodeName.toLowerCase();"input"===c&&ia.test(a.type)?b.checked=a.checked:"input"!==c&&"textarea"!==c||(b.defaultValue=a.defaultValue)}function Ia(a,b,c,d){b=g.apply([],b);var e,f,h,i,j,k,l=0,m=a.length,n=m-1,q=b[0],s=r.isFunction(q);if(s||m>1&&"string"==typeof q&&!o.checkClone&&Aa.test(q))return a.each(function(e){var f=a.eq(e);s&&(b[0]=q.call(this,e,f.html())),Ia(f,b,c,d)});if(m&&(e=pa(b,a[0].ownerDocument,!1,a,d),f=e.firstChild,1===e.childNodes.length&&(e=f),f||d)){for(h=r.map(ma(e,"script"),Ea),i=h.length;l<m;l++)j=e,l!==n&&(j=r.clone(j,!0,!0),i&&r.merge(h,ma(j,"script"))),c.call(a[l],j,l);if(i)for(k=h[h.length-1].ownerDocument,r.map(h,Fa),l=0;l<i;l++)j=h[l],ka.test(j.type||"")&&!V.access(j,"globalEval")&&r.contains(k,j)&&(j.src?r._evalUrl&&r._evalUrl(j.src):p(j.textContent.replace(Ca,""),k))}return a}function Ja(a,b,c){for(var d,e=b?r.filter(b,a):a,f=0;null!=(d=e[f]);f++)c||1!==d.nodeType||r.cleanData(ma(d)),d.parentNode&&(c&&r.contains(d.ownerDocument,d)&&na(ma(d,"script")),d.parentNode.removeChild(d));return a}r.extend({htmlPrefilter:function(a){return a.replace(ya,"<$1></$2>")},clone:function(a,b,c){var d,e,f,g,h=a.cloneNode(!0),i=r.contains(a.ownerDocument,a);if(!(o.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||r.isXMLDoc(a)))for(g=ma(h),f=ma(a),d=0,e=f.length;d<e;d++)Ha(f[d],g[d]);if(b)if(c)for(f=f||ma(a),g=g||ma(h),d=0,e=f.length;d<e;d++)Ga(f[d],g[d]);else Ga(a,h);return g=ma(h,"script"),g.length>0&&na(g,!i&&ma(a,"script")),h},cleanData:function(a){for(var b,c,d,e=r.event.special,f=0;void 0!==(c=a[f]);f++)if(T(c)){if(b=c[V.expando]){if(b.events)for(d in b.events)e[d]?r.event.remove(c,d):r.removeEvent(c,d,b.handle);c[V.expando]=void 0}c[W.expando]&&(c[W.expando]=void 0)}}}),r.fn.extend({detach:function(a){return Ja(this,a,!0)},remove:function(a){return Ja(this,a)},text:function(a){return S(this,function(a){return void 0===a?r.text(this):this.empty().each(function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=a)})},null,a,arguments.length)},append:function(){return Ia(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Da(this,a);b.appendChild(a)}})},prepend:function(){return Ia(this,arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=Da(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return Ia(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return Ia(this,arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},empty:function(){for(var a,b=0;null!=(a=this[b]);b++)1===a.nodeType&&(r.cleanData(ma(a,!1)),a.textContent="");return this},clone:function(a,b){return a=null!=a&&a,b=null==b?a:b,this.map(function(){return r.clone(this,a,b)})},html:function(a){return S(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a&&1===b.nodeType)return b.innerHTML;if("string"==typeof a&&!za.test(a)&&!la[(ja.exec(a)||["",""])[1].toLowerCase()]){a=r.htmlPrefilter(a);try{for(;c<d;c++)b=this[c]||{},1===b.nodeType&&(r.cleanData(ma(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=[];return Ia(this,arguments,function(b){var c=this.parentNode;r.inArray(this,a)<0&&(r.cleanData(ma(this)),c&&c.replaceChild(b,this))},a)}}),r.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){r.fn[a]=function(a){for(var c,d=[],e=r(a),f=e.length-1,g=0;g<=f;g++)c=g===f?this:this.clone(!0),r(e[g])[b](c),h.apply(d,c.get());return this.pushStack(d)}});var Ka=/^margin/,La=new RegExp("^("+_+")(?!px)[a-z%]+$","i"),Ma=function(b){var c=b.ownerDocument.defaultView;return c&&c.opener||(c=a),c.getComputedStyle(b)};!function(){function b(){if(i){i.style.cssText="box-sizing:border-box;position:relative;display:block;margin:auto;border:1px;padding:1px;top:1%;width:50%",i.innerHTML="",qa.appendChild(h);var b=a.getComputedStyle(i);c="1%"!==b.top,g="2px"===b.marginLeft,e="4px"===b.width,i.style.marginRight="50%",f="4px"===b.marginRight,qa.removeChild(h),i=null}}var c,e,f,g,h=d.createElement("div"),i=d.createElement("div");i.style&&(i.style.backgroundClip="content-box",i.cloneNode(!0).style.backgroundClip="",o.clearCloneStyle="content-box"===i.style.backgroundClip,h.style.cssText="border:0;width:8px;height:0;top:0;left:-9999px;padding:0;margin-top:1px;position:absolute",h.appendChild(i),r.extend(o,{pixelPosition:function(){return b(),c},boxSizingReliable:function(){return b(),e},pixelMarginRight:function(){return b(),f},reliableMarginLeft:function(){return b(),g}}))}();function Na(a,b,c){var d,e,f,g,h=a.style;return c=c||Ma(a),c&&(g=c.getPropertyValue(b)||c[b],""!==g||r.contains(a.ownerDocument,a)||(g=r.style(a,b)),!o.pixelMarginRight()&&La.test(g)&&Ka.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0!==g?g+"":g}function Oa(a,b){return{get:function(){return a()?void delete this.get:(this.get=b).apply(this,arguments)}}}var Pa=/^(none|table(?!-c[ea]).+)/,Qa={position:"absolute",visibility:"hidden",display:"block"},Ra={letterSpacing:"0",fontWeight:"400"},Sa=["Webkit","Moz","ms"],Ta=d.createElement("div").style;function Ua(a){if(a in Ta)return a;var b=a[0].toUpperCase()+a.slice(1),c=Sa.length;while(c--)if(a=Sa[c]+b,a in Ta)return a}function Va(a,b,c){var d=aa.exec(b);return d?Math.max(0,d[2]-(c||0))+(d[3]||"px"):b}function Wa(a,b,c,d,e){var f,g=0;for(f=c===(d?"border":"content")?4:"width"===b?1:0;f<4;f+=2)"margin"===c&&(g+=r.css(a,c+ba[f],!0,e)),d?("content"===c&&(g-=r.css(a,"padding"+ba[f],!0,e)),"margin"!==c&&(g-=r.css(a,"border"+ba[f]+"Width",!0,e))):(g+=r.css(a,"padding"+ba[f],!0,e),"padding"!==c&&(g+=r.css(a,"border"+ba[f]+"Width",!0,e)));return g}function Xa(a,b,c){var d,e=!0,f=Ma(a),g="border-box"===r.css(a,"boxSizing",!1,f);if(a.getClientRects().length&&(d=a.getBoundingClientRect()[b]),d<=0||null==d){if(d=Na(a,b,f),(d<0||null==d)&&(d=a.style[b]),La.test(d))return d;e=g&&(o.boxSizingReliable()||d===a.style[b]),d=parseFloat(d)||0}return d+Wa(a,b,c||(g?"border":"content"),e,f)+"px"}r.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Na(a,"opacity");return""===c?"1":c}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":"cssFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=r.camelCase(b),i=a.style;return b=r.cssProps[h]||(r.cssProps[h]=Ua(h)||h),g=r.cssHooks[b]||r.cssHooks[h],void 0===c?g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b]:(f=typeof c,"string"===f&&(e=aa.exec(c))&&e[1]&&(c=ea(a,b,e),f="number"),null!=c&&c===c&&("number"===f&&(c+=e&&e[3]||(r.cssNumber[h]?"":"px")),o.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),g&&"set"in g&&void 0===(c=g.set(a,c,d))||(i[b]=c)),void 0)}},css:function(a,b,c,d){var e,f,g,h=r.camelCase(b);return b=r.cssProps[h]||(r.cssProps[h]=Ua(h)||h),g=r.cssHooks[b]||r.cssHooks[h],g&&"get"in g&&(e=g.get(a,!0,c)),void 0===e&&(e=Na(a,b,d)),"normal"===e&&b in Ra&&(e=Ra[b]),""===c||c?(f=parseFloat(e),c===!0||isFinite(f)?f||0:e):e}}),r.each(["height","width"],function(a,b){r.cssHooks[b]={get:function(a,c,d){if(c)return!Pa.test(r.css(a,"display"))||a.getClientRects().length&&a.getBoundingClientRect().width?Xa(a,b,d):da(a,Qa,function(){return Xa(a,b,d)})},set:function(a,c,d){var e,f=d&&Ma(a),g=d&&Wa(a,b,d,"border-box"===r.css(a,"boxSizing",!1,f),f);return g&&(e=aa.exec(c))&&"px"!==(e[3]||"px")&&(a.style[b]=c,c=r.css(a,b)),Va(a,c,g)}}}),r.cssHooks.marginLeft=Oa(o.reliableMarginLeft,function(a,b){if(b)return(parseFloat(Na(a,"marginLeft"))||a.getBoundingClientRect().left-da(a,{marginLeft:0},function(){return a.getBoundingClientRect().left}))+"px"}),r.each({margin:"",padding:"",border:"Width"},function(a,b){r.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];d<4;d++)e[a+ba[d]+b]=f[d]||f[d-2]||f[0];return e}},Ka.test(a)||(r.cssHooks[a+b].set=Va)}),r.fn.extend({css:function(a,b){return S(this,function(a,b,c){var d,e,f={},g=0;if(r.isArray(b)){for(d=Ma(a),e=b.length;g<e;g++)f[b[g]]=r.css(a,b[g],!1,d);return f}return void 0!==c?r.style(a,b,c):r.css(a,b)},a,b,arguments.length>1)}});function Ya(a,b,c,d,e){return new Ya.prototype.init(a,b,c,d,e)}r.Tween=Ya,Ya.prototype={constructor:Ya,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||r.easing._default,this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(r.cssNumber[c]?"":"px")},cur:function(){var a=Ya.propHooks[this.prop];return a&&a.get?a.get(this):Ya.propHooks._default.get(this)},run:function(a){var b,c=Ya.propHooks[this.prop];return this.options.duration?this.pos=b=r.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):this.pos=b=a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Ya.propHooks._default.set(this),this}},Ya.prototype.init.prototype=Ya.prototype,Ya.propHooks={_default:{get:function(a){var b;return 1!==a.elem.nodeType||null!=a.elem[a.prop]&&null==a.elem.style[a.prop]?a.elem[a.prop]:(b=r.css(a.elem,a.prop,""),b&&"auto"!==b?b:0)},set:function(a){r.fx.step[a.prop]?r.fx.step[a.prop](a):1!==a.elem.nodeType||null==a.elem.style[r.cssProps[a.prop]]&&!r.cssHooks[a.prop]?a.elem[a.prop]=a.now:r.style(a.elem,a.prop,a.now+a.unit)}}},Ya.propHooks.scrollTop=Ya.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},r.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2},_default:"swing"},r.fx=Ya.prototype.init,r.fx.step={};var Za,$a,_a=/^(?:toggle|show|hide)$/,ab=/queueHooks$/;function bb(){$a&&(a.requestAnimationFrame(bb),r.fx.tick())}function cb(){return a.setTimeout(function(){Za=void 0}),Za=r.now()}function db(a,b){var c,d=0,e={height:a};for(b=b?1:0;d<4;d+=2-b)c=ba[d],e["margin"+c]=e["padding"+c]=a;return b&&(e.opacity=e.width=a),e}function eb(a,b,c){for(var d,e=(hb.tweeners[b]||[]).concat(hb.tweeners["*"]),f=0,g=e.length;f<g;f++)if(d=e[f].call(c,b,a))return d}function fb(a,b,c){var d,e,f,g,h,i,j,k,l="width"in b||"height"in b,m=this,n={},o=a.style,p=a.nodeType&&ca(a),q=V.get(a,"fxshow");c.queue||(g=r._queueHooks(a,"fx"),null==g.unqueued&&(g.unqueued=0,h=g.empty.fire,g.empty.fire=function(){g.unqueued||h()}),g.unqueued++,m.always(function(){m.always(function(){g.unqueued--,r.queue(a,"fx").length||g.empty.fire()})}));for(d in b)if(e=b[d],_a.test(e)){if(delete b[d],f=f||"toggle"===e,e===(p?"hide":"show")){if("show"!==e||!q||void 0===q[d])continue;p=!0}n[d]=q&&q[d]||r.style(a,d)}if(i=!r.isEmptyObject(b),i||!r.isEmptyObject(n)){l&&1===a.nodeType&&(c.overflow=[o.overflow,o.overflowX,o.overflowY],j=q&&q.display,null==j&&(j=V.get(a,"display")),k=r.css(a,"display"),"none"===k&&(j?k=j:(ha([a],!0),j=a.style.display||j,k=r.css(a,"display"),ha([a]))),("inline"===k||"inline-block"===k&&null!=j)&&"none"===r.css(a,"float")&&(i||(m.done(function(){o.display=j}),null==j&&(k=o.display,j="none"===k?"":k)),o.display="inline-block")),c.overflow&&(o.overflow="hidden",m.always(function(){o.overflow=c.overflow[0],o.overflowX=c.overflow[1],o.overflowY=c.overflow[2]})),i=!1;for(d in n)i||(q?"hidden"in q&&(p=q.hidden):q=V.access(a,"fxshow",{display:j}),f&&(q.hidden=!p),p&&ha([a],!0),m.done(function(){p||ha([a]),V.remove(a,"fxshow");for(d in n)r.style(a,d,n[d])})),i=eb(p?q[d]:0,d,m),d in q||(q[d]=i.start,p&&(i.end=i.start,i.start=0))}}function gb(a,b){var c,d,e,f,g;for(c in a)if(d=r.camelCase(c),e=b[d],f=a[c],r.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=r.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function hb(a,b,c){var d,e,f=0,g=hb.prefilters.length,h=r.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=Za||cb(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;g<i;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),f<1&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:r.extend({},b),opts:r.extend(!0,{specialEasing:{},easing:r.easing._default},c),originalProperties:b,originalOptions:c,startTime:Za||cb(),duration:c.duration,tweens:[],createTween:function(b,c){var d=r.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;c<d;c++)j.tweens[c].run(1);return b?(h.notifyWith(a,[j,1,0]),h.resolveWith(a,[j,b])):h.rejectWith(a,[j,b]),this}}),k=j.props;for(gb(k,j.opts.specialEasing);f<g;f++)if(d=hb.prefilters[f].call(j,a,k,j.opts))return r.isFunction(d.stop)&&(r._queueHooks(j.elem,j.opts.queue).stop=r.proxy(d.stop,d)),d;return r.map(k,eb,j),r.isFunction(j.opts.start)&&j.opts.start.call(a,j),r.fx.timer(r.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}r.Animation=r.extend(hb,{tweeners:{"*":[function(a,b){var c=this.createTween(a,b);return ea(c.elem,a,aa.exec(b),c),c}]},tweener:function(a,b){r.isFunction(a)?(b=a,a=["*"]):a=a.match(K);for(var c,d=0,e=a.length;d<e;d++)c=a[d],hb.tweeners[c]=hb.tweeners[c]||[],hb.tweeners[c].unshift(b)},prefilters:[fb],prefilter:function(a,b){b?hb.prefilters.unshift(a):hb.prefilters.push(a)}}),r.speed=function(a,b,c){var e=a&&"object"==typeof a?r.extend({},a):{complete:c||!c&&b||r.isFunction(a)&&a,duration:a,easing:c&&b||b&&!r.isFunction(b)&&b};return r.fx.off||d.hidden?e.duration=0:"number"!=typeof e.duration&&(e.duration in r.fx.speeds?e.duration=r.fx.speeds[e.duration]:e.duration=r.fx.speeds._default),null!=e.queue&&e.queue!==!0||(e.queue="fx"),e.old=e.complete,e.complete=function(){r.isFunction(e.old)&&e.old.call(this),e.queue&&r.dequeue(this,e.queue)},e},r.fn.extend({fadeTo:function(a,b,c,d){return this.filter(ca).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=r.isEmptyObject(a),f=r.speed(b,c,d),g=function(){var b=hb(this,r.extend({},a),f);(e||V.get(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=r.timers,g=V.get(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&ab.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));!b&&c||r.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=V.get(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=r.timers,g=d?d.length:0;for(c.finish=!0,r.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;b<g;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),r.each(["toggle","show","hide"],function(a,b){var c=r.fn[b];r.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(db(b,!0),a,d,e)}}),r.each({slideDown:db("show"),slideUp:db("hide"),slideToggle:db("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){r.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),r.timers=[],r.fx.tick=function(){var a,b=0,c=r.timers;for(Za=r.now();b<c.length;b++)a=c[b],a()||c[b]!==a||c.splice(b--,1);c.length||r.fx.stop(),Za=void 0},r.fx.timer=function(a){r.timers.push(a),a()?r.fx.start():r.timers.pop()},r.fx.interval=13,r.fx.start=function(){$a||($a=a.requestAnimationFrame?a.requestAnimationFrame(bb):a.setInterval(r.fx.tick,r.fx.interval))},r.fx.stop=function(){a.cancelAnimationFrame?a.cancelAnimationFrame($a):a.clearInterval($a),$a=null},r.fx.speeds={slow:600,fast:200,_default:400},r.fn.delay=function(b,c){return b=r.fx?r.fx.speeds[b]||b:b,c=c||"fx",this.queue(c,function(c,d){var e=a.setTimeout(c,b);d.stop=function(){a.clearTimeout(e)}})},function(){var a=d.createElement("input"),b=d.createElement("select"),c=b.appendChild(d.createElement("option"));a.type="checkbox",o.checkOn=""!==a.value,o.optSelected=c.selected,a=d.createElement("input"),a.value="t",a.type="radio",o.radioValue="t"===a.value}();var ib,jb=r.expr.attrHandle;r.fn.extend({attr:function(a,b){return S(this,r.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){r.removeAttr(this,a)})}}),r.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return"undefined"==typeof a.getAttribute?r.prop(a,b,c):(1===f&&r.isXMLDoc(a)||(e=r.attrHooks[b.toLowerCase()]||(r.expr.match.bool.test(b)?ib:void 0)),
void 0!==c?null===c?void r.removeAttr(a,b):e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:(a.setAttribute(b,c+""),c):e&&"get"in e&&null!==(d=e.get(a,b))?d:(d=r.find.attr(a,b),null==d?void 0:d))},attrHooks:{type:{set:function(a,b){if(!o.radioValue&&"radio"===b&&r.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}},removeAttr:function(a,b){var c,d=0,e=b&&b.match(K);if(e&&1===a.nodeType)while(c=e[d++])a.removeAttribute(c)}}),ib={set:function(a,b,c){return b===!1?r.removeAttr(a,c):a.setAttribute(c,c),c}},r.each(r.expr.match.bool.source.match(/\w+/g),function(a,b){var c=jb[b]||r.find.attr;jb[b]=function(a,b,d){var e,f,g=b.toLowerCase();return d||(f=jb[g],jb[g]=e,e=null!=c(a,b,d)?g:null,jb[g]=f),e}});var kb=/^(?:input|select|textarea|button)$/i,lb=/^(?:a|area)$/i;r.fn.extend({prop:function(a,b){return S(this,r.prop,a,b,arguments.length>1)},removeProp:function(a){return this.each(function(){delete this[r.propFix[a]||a]})}}),r.extend({prop:function(a,b,c){var d,e,f=a.nodeType;if(3!==f&&8!==f&&2!==f)return 1===f&&r.isXMLDoc(a)||(b=r.propFix[b]||b,e=r.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=r.find.attr(a,"tabindex");return b?parseInt(b,10):kb.test(a.nodeName)||lb.test(a.nodeName)&&a.href?0:-1}}},propFix:{"for":"htmlFor","class":"className"}}),o.optSelected||(r.propHooks.selected={get:function(a){var b=a.parentNode;return b&&b.parentNode&&b.parentNode.selectedIndex,null},set:function(a){var b=a.parentNode;b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex)}}),r.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){r.propFix[this.toLowerCase()]=this});function mb(a){var b=a.match(K)||[];return b.join(" ")}function nb(a){return a.getAttribute&&a.getAttribute("class")||""}r.fn.extend({addClass:function(a){var b,c,d,e,f,g,h,i=0;if(r.isFunction(a))return this.each(function(b){r(this).addClass(a.call(this,b,nb(this)))});if("string"==typeof a&&a){b=a.match(K)||[];while(c=this[i++])if(e=nb(c),d=1===c.nodeType&&" "+mb(e)+" "){g=0;while(f=b[g++])d.indexOf(" "+f+" ")<0&&(d+=f+" ");h=mb(d),e!==h&&c.setAttribute("class",h)}}return this},removeClass:function(a){var b,c,d,e,f,g,h,i=0;if(r.isFunction(a))return this.each(function(b){r(this).removeClass(a.call(this,b,nb(this)))});if(!arguments.length)return this.attr("class","");if("string"==typeof a&&a){b=a.match(K)||[];while(c=this[i++])if(e=nb(c),d=1===c.nodeType&&" "+mb(e)+" "){g=0;while(f=b[g++])while(d.indexOf(" "+f+" ")>-1)d=d.replace(" "+f+" "," ");h=mb(d),e!==h&&c.setAttribute("class",h)}}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):r.isFunction(a)?this.each(function(c){r(this).toggleClass(a.call(this,c,nb(this),b),b)}):this.each(function(){var b,d,e,f;if("string"===c){d=0,e=r(this),f=a.match(K)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else void 0!==a&&"boolean"!==c||(b=nb(this),b&&V.set(this,"__className__",b),this.setAttribute&&this.setAttribute("class",b||a===!1?"":V.get(this,"__className__")||""))})},hasClass:function(a){var b,c,d=0;b=" "+a+" ";while(c=this[d++])if(1===c.nodeType&&(" "+mb(nb(c))+" ").indexOf(b)>-1)return!0;return!1}});var ob=/\r/g;r.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=r.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,r(this).val()):a,null==e?e="":"number"==typeof e?e+="":r.isArray(e)&&(e=r.map(e,function(a){return null==a?"":a+""})),b=r.valHooks[this.type]||r.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=r.valHooks[e.type]||r.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(ob,""):null==c?"":c)}}}),r.extend({valHooks:{option:{get:function(a){var b=r.find.attr(a,"value");return null!=b?b:mb(r.text(a))}},select:{get:function(a){var b,c,d,e=a.options,f=a.selectedIndex,g="select-one"===a.type,h=g?null:[],i=g?f+1:e.length;for(d=f<0?i:g?f:0;d<i;d++)if(c=e[d],(c.selected||d===f)&&!c.disabled&&(!c.parentNode.disabled||!r.nodeName(c.parentNode,"optgroup"))){if(b=r(c).val(),g)return b;h.push(b)}return h},set:function(a,b){var c,d,e=a.options,f=r.makeArray(b),g=e.length;while(g--)d=e[g],(d.selected=r.inArray(r.valHooks.option.get(d),f)>-1)&&(c=!0);return c||(a.selectedIndex=-1),f}}}}),r.each(["radio","checkbox"],function(){r.valHooks[this]={set:function(a,b){if(r.isArray(b))return a.checked=r.inArray(r(a).val(),b)>-1}},o.checkOn||(r.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var pb=/^(?:focusinfocus|focusoutblur)$/;r.extend(r.event,{trigger:function(b,c,e,f){var g,h,i,j,k,m,n,o=[e||d],p=l.call(b,"type")?b.type:b,q=l.call(b,"namespace")?b.namespace.split("."):[];if(h=i=e=e||d,3!==e.nodeType&&8!==e.nodeType&&!pb.test(p+r.event.triggered)&&(p.indexOf(".")>-1&&(q=p.split("."),p=q.shift(),q.sort()),k=p.indexOf(":")<0&&"on"+p,b=b[r.expando]?b:new r.Event(p,"object"==typeof b&&b),b.isTrigger=f?2:3,b.namespace=q.join("."),b.rnamespace=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=e),c=null==c?[b]:r.makeArray(c,[b]),n=r.event.special[p]||{},f||!n.trigger||n.trigger.apply(e,c)!==!1)){if(!f&&!n.noBubble&&!r.isWindow(e)){for(j=n.delegateType||p,pb.test(j+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),i=h;i===(e.ownerDocument||d)&&o.push(i.defaultView||i.parentWindow||a)}g=0;while((h=o[g++])&&!b.isPropagationStopped())b.type=g>1?j:n.bindType||p,m=(V.get(h,"events")||{})[b.type]&&V.get(h,"handle"),m&&m.apply(h,c),m=k&&h[k],m&&m.apply&&T(h)&&(b.result=m.apply(h,c),b.result===!1&&b.preventDefault());return b.type=p,f||b.isDefaultPrevented()||n._default&&n._default.apply(o.pop(),c)!==!1||!T(e)||k&&r.isFunction(e[p])&&!r.isWindow(e)&&(i=e[k],i&&(e[k]=null),r.event.triggered=p,e[p](),r.event.triggered=void 0,i&&(e[k]=i)),b.result}},simulate:function(a,b,c){var d=r.extend(new r.Event,c,{type:a,isSimulated:!0});r.event.trigger(d,null,b)}}),r.fn.extend({trigger:function(a,b){return this.each(function(){r.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];if(c)return r.event.trigger(a,b,c,!0)}}),r.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(a,b){r.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),r.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}}),o.focusin="onfocusin"in a,o.focusin||r.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){r.event.simulate(b,a.target,r.event.fix(a))};r.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=V.access(d,b);e||d.addEventListener(a,c,!0),V.access(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=V.access(d,b)-1;e?V.access(d,b,e):(d.removeEventListener(a,c,!0),V.remove(d,b))}}});var qb=a.location,rb=r.now(),sb=/\?/;r.parseXML=function(b){var c;if(!b||"string"!=typeof b)return null;try{c=(new a.DOMParser).parseFromString(b,"text/xml")}catch(d){c=void 0}return c&&!c.getElementsByTagName("parsererror").length||r.error("Invalid XML: "+b),c};var tb=/\[\]$/,ub=/\r?\n/g,vb=/^(?:submit|button|image|reset|file)$/i,wb=/^(?:input|select|textarea|keygen)/i;function xb(a,b,c,d){var e;if(r.isArray(b))r.each(b,function(b,e){c||tb.test(a)?d(a,e):xb(a+"["+("object"==typeof e&&null!=e?b:"")+"]",e,c,d)});else if(c||"object"!==r.type(b))d(a,b);else for(e in b)xb(a+"["+e+"]",b[e],c,d)}r.param=function(a,b){var c,d=[],e=function(a,b){var c=r.isFunction(b)?b():b;d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(null==c?"":c)};if(r.isArray(a)||a.jquery&&!r.isPlainObject(a))r.each(a,function(){e(this.name,this.value)});else for(c in a)xb(c,a[c],b,e);return d.join("&")},r.fn.extend({serialize:function(){return r.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=r.prop(this,"elements");return a?r.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!r(this).is(":disabled")&&wb.test(this.nodeName)&&!vb.test(a)&&(this.checked||!ia.test(a))}).map(function(a,b){var c=r(this).val();return null==c?null:r.isArray(c)?r.map(c,function(a){return{name:b.name,value:a.replace(ub,"\r\n")}}):{name:b.name,value:c.replace(ub,"\r\n")}}).get()}});var yb=/%20/g,zb=/#.*$/,Ab=/([?&])_=[^&]*/,Bb=/^(.*?):[ \t]*([^\r\n]*)$/gm,Cb=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Db=/^(?:GET|HEAD)$/,Eb=/^\/\//,Fb={},Gb={},Hb="*/".concat("*"),Ib=d.createElement("a");Ib.href=qb.href;function Jb(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(K)||[];if(r.isFunction(c))while(d=f[e++])"+"===d[0]?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Kb(a,b,c,d){var e={},f=a===Gb;function g(h){var i;return e[h]=!0,r.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Lb(a,b){var c,d,e=r.ajaxSettings.flatOptions||{};for(c in b)void 0!==b[c]&&((e[c]?a:d||(d={}))[c]=b[c]);return d&&r.extend(!0,a,d),a}function Mb(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===d&&(d=a.mimeType||b.getResponseHeader("Content-Type"));if(d)for(e in h)if(h[e]&&h[e].test(d)){i.unshift(e);break}if(i[0]in c)f=i[0];else{for(e in c){if(!i[0]||a.converters[e+" "+i[0]]){f=e;break}g||(g=e)}f=f||g}if(f)return f!==i[0]&&i.unshift(f),c[f]}function Nb(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}r.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:qb.href,type:"GET",isLocal:Cb.test(qb.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Hb,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":r.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Lb(Lb(a,r.ajaxSettings),b):Lb(r.ajaxSettings,a)},ajaxPrefilter:Jb(Fb),ajaxTransport:Jb(Gb),ajax:function(b,c){"object"==typeof b&&(c=b,b=void 0),c=c||{};var e,f,g,h,i,j,k,l,m,n,o=r.ajaxSetup({},c),p=o.context||o,q=o.context&&(p.nodeType||p.jquery)?r(p):r.event,s=r.Deferred(),t=r.Callbacks("once memory"),u=o.statusCode||{},v={},w={},x="canceled",y={readyState:0,getResponseHeader:function(a){var b;if(k){if(!h){h={};while(b=Bb.exec(g))h[b[1].toLowerCase()]=b[2]}b=h[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return k?g:null},setRequestHeader:function(a,b){return null==k&&(a=w[a.toLowerCase()]=w[a.toLowerCase()]||a,v[a]=b),this},overrideMimeType:function(a){return null==k&&(o.mimeType=a),this},statusCode:function(a){var b;if(a)if(k)y.always(a[y.status]);else for(b in a)u[b]=[u[b],a[b]];return this},abort:function(a){var b=a||x;return e&&e.abort(b),A(0,b),this}};if(s.promise(y),o.url=((b||o.url||qb.href)+"").replace(Eb,qb.protocol+"//"),o.type=c.method||c.type||o.method||o.type,o.dataTypes=(o.dataType||"*").toLowerCase().match(K)||[""],null==o.crossDomain){j=d.createElement("a");try{j.href=o.url,j.href=j.href,o.crossDomain=Ib.protocol+"//"+Ib.host!=j.protocol+"//"+j.host}catch(z){o.crossDomain=!0}}if(o.data&&o.processData&&"string"!=typeof o.data&&(o.data=r.param(o.data,o.traditional)),Kb(Fb,o,c,y),k)return y;l=r.event&&o.global,l&&0===r.active++&&r.event.trigger("ajaxStart"),o.type=o.type.toUpperCase(),o.hasContent=!Db.test(o.type),f=o.url.replace(zb,""),o.hasContent?o.data&&o.processData&&0===(o.contentType||"").indexOf("application/x-www-form-urlencoded")&&(o.data=o.data.replace(yb,"+")):(n=o.url.slice(f.length),o.data&&(f+=(sb.test(f)?"&":"?")+o.data,delete o.data),o.cache===!1&&(f=f.replace(Ab,"$1"),n=(sb.test(f)?"&":"?")+"_="+rb++ +n),o.url=f+n),o.ifModified&&(r.lastModified[f]&&y.setRequestHeader("If-Modified-Since",r.lastModified[f]),r.etag[f]&&y.setRequestHeader("If-None-Match",r.etag[f])),(o.data&&o.hasContent&&o.contentType!==!1||c.contentType)&&y.setRequestHeader("Content-Type",o.contentType),y.setRequestHeader("Accept",o.dataTypes[0]&&o.accepts[o.dataTypes[0]]?o.accepts[o.dataTypes[0]]+("*"!==o.dataTypes[0]?", "+Hb+"; q=0.01":""):o.accepts["*"]);for(m in o.headers)y.setRequestHeader(m,o.headers[m]);if(o.beforeSend&&(o.beforeSend.call(p,y,o)===!1||k))return y.abort();if(x="abort",t.add(o.complete),y.done(o.success),y.fail(o.error),e=Kb(Gb,o,c,y)){if(y.readyState=1,l&&q.trigger("ajaxSend",[y,o]),k)return y;o.async&&o.timeout>0&&(i=a.setTimeout(function(){y.abort("timeout")},o.timeout));try{k=!1,e.send(v,A)}catch(z){if(k)throw z;A(-1,z)}}else A(-1,"No Transport");function A(b,c,d,h){var j,m,n,v,w,x=c;k||(k=!0,i&&a.clearTimeout(i),e=void 0,g=h||"",y.readyState=b>0?4:0,j=b>=200&&b<300||304===b,d&&(v=Mb(o,y,d)),v=Nb(o,v,y,j),j?(o.ifModified&&(w=y.getResponseHeader("Last-Modified"),w&&(r.lastModified[f]=w),w=y.getResponseHeader("etag"),w&&(r.etag[f]=w)),204===b||"HEAD"===o.type?x="nocontent":304===b?x="notmodified":(x=v.state,m=v.data,n=v.error,j=!n)):(n=x,!b&&x||(x="error",b<0&&(b=0))),y.status=b,y.statusText=(c||x)+"",j?s.resolveWith(p,[m,x,y]):s.rejectWith(p,[y,x,n]),y.statusCode(u),u=void 0,l&&q.trigger(j?"ajaxSuccess":"ajaxError",[y,o,j?m:n]),t.fireWith(p,[y,x]),l&&(q.trigger("ajaxComplete",[y,o]),--r.active||r.event.trigger("ajaxStop")))}return y},getJSON:function(a,b,c){return r.get(a,b,c,"json")},getScript:function(a,b){return r.get(a,void 0,b,"script")}}),r.each(["get","post"],function(a,b){r[b]=function(a,c,d,e){return r.isFunction(c)&&(e=e||d,d=c,c=void 0),r.ajax(r.extend({url:a,type:b,dataType:e,data:c,success:d},r.isPlainObject(a)&&a))}}),r._evalUrl=function(a){return r.ajax({url:a,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,"throws":!0})},r.fn.extend({wrapAll:function(a){var b;return this[0]&&(r.isFunction(a)&&(a=a.call(this[0])),b=r(a,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstElementChild)a=a.firstElementChild;return a}).append(this)),this},wrapInner:function(a){return r.isFunction(a)?this.each(function(b){r(this).wrapInner(a.call(this,b))}):this.each(function(){var b=r(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=r.isFunction(a);return this.each(function(c){r(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(a){return this.parent(a).not("body").each(function(){r(this).replaceWith(this.childNodes)}),this}}),r.expr.pseudos.hidden=function(a){return!r.expr.pseudos.visible(a)},r.expr.pseudos.visible=function(a){return!!(a.offsetWidth||a.offsetHeight||a.getClientRects().length)},r.ajaxSettings.xhr=function(){try{return new a.XMLHttpRequest}catch(b){}};var Ob={0:200,1223:204},Pb=r.ajaxSettings.xhr();o.cors=!!Pb&&"withCredentials"in Pb,o.ajax=Pb=!!Pb,r.ajaxTransport(function(b){var c,d;if(o.cors||Pb&&!b.crossDomain)return{send:function(e,f){var g,h=b.xhr();if(h.open(b.type,b.url,b.async,b.username,b.password),b.xhrFields)for(g in b.xhrFields)h[g]=b.xhrFields[g];b.mimeType&&h.overrideMimeType&&h.overrideMimeType(b.mimeType),b.crossDomain||e["X-Requested-With"]||(e["X-Requested-With"]="XMLHttpRequest");for(g in e)h.setRequestHeader(g,e[g]);c=function(a){return function(){c&&(c=d=h.onload=h.onerror=h.onabort=h.onreadystatechange=null,"abort"===a?h.abort():"error"===a?"number"!=typeof h.status?f(0,"error"):f(h.status,h.statusText):f(Ob[h.status]||h.status,h.statusText,"text"!==(h.responseType||"text")||"string"!=typeof h.responseText?{binary:h.response}:{text:h.responseText},h.getAllResponseHeaders()))}},h.onload=c(),d=h.onerror=c("error"),void 0!==h.onabort?h.onabort=d:h.onreadystatechange=function(){4===h.readyState&&a.setTimeout(function(){c&&d()})},c=c("abort");try{h.send(b.hasContent&&b.data||null)}catch(i){if(c)throw i}},abort:function(){c&&c()}}}),r.ajaxPrefilter(function(a){a.crossDomain&&(a.contents.script=!1)}),r.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(a){return r.globalEval(a),a}}}),r.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET")}),r.ajaxTransport("script",function(a){if(a.crossDomain){var b,c;return{send:function(e,f){b=r("<script>").prop({charset:a.scriptCharset,src:a.url}).on("load error",c=function(a){b.remove(),c=null,a&&f("error"===a.type?404:200,a.type)}),d.head.appendChild(b[0])},abort:function(){c&&c()}}}});var Qb=[],Rb=/(=)\?(?=&|$)|\?\?/;r.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=Qb.pop()||r.expando+"_"+rb++;return this[a]=!0,a}}),r.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(Rb.test(b.url)?"url":"string"==typeof b.data&&0===(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&Rb.test(b.data)&&"data");if(h||"jsonp"===b.dataTypes[0])return e=b.jsonpCallback=r.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(Rb,"$1"+e):b.jsonp!==!1&&(b.url+=(sb.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||r.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){void 0===f?r(a).removeProp(e):a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,Qb.push(e)),g&&r.isFunction(f)&&f(g[0]),g=f=void 0}),"script"}),o.createHTMLDocument=function(){var a=d.implementation.createHTMLDocument("").body;return a.innerHTML="<form></form><form></form>",2===a.childNodes.length}(),r.parseHTML=function(a,b,c){if("string"!=typeof a)return[];"boolean"==typeof b&&(c=b,b=!1);var e,f,g;return b||(o.createHTMLDocument?(b=d.implementation.createHTMLDocument(""),e=b.createElement("base"),e.href=d.location.href,b.head.appendChild(e)):b=d),f=B.exec(a),g=!c&&[],f?[b.createElement(f[1])]:(f=pa([a],b,g),g&&g.length&&r(g).remove(),r.merge([],f.childNodes))},r.fn.load=function(a,b,c){var d,e,f,g=this,h=a.indexOf(" ");return h>-1&&(d=mb(a.slice(h)),a=a.slice(0,h)),r.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(e="POST"),g.length>0&&r.ajax({url:a,type:e||"GET",dataType:"html",data:b}).done(function(a){f=arguments,g.html(d?r("<div>").append(r.parseHTML(a)).find(d):a)}).always(c&&function(a,b){g.each(function(){c.apply(this,f||[a.responseText,b,a])})}),this},r.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){r.fn[b]=function(a){return this.on(b,a)}}),r.expr.pseudos.animated=function(a){return r.grep(r.timers,function(b){return a===b.elem}).length};function Sb(a){return r.isWindow(a)?a:9===a.nodeType&&a.defaultView}r.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=r.css(a,"position"),l=r(a),m={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=r.css(a,"top"),i=r.css(a,"left"),j=("absolute"===k||"fixed"===k)&&(f+i).indexOf("auto")>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),r.isFunction(b)&&(b=b.call(a,c,r.extend({},h))),null!=b.top&&(m.top=b.top-h.top+g),null!=b.left&&(m.left=b.left-h.left+e),"using"in b?b.using.call(a,m):l.css(m)}},r.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){r.offset.setOffset(this,a,b)});var b,c,d,e,f=this[0];if(f)return f.getClientRects().length?(d=f.getBoundingClientRect(),d.width||d.height?(e=f.ownerDocument,c=Sb(e),b=e.documentElement,{top:d.top+c.pageYOffset-b.clientTop,left:d.left+c.pageXOffset-b.clientLeft}):d):{top:0,left:0}},position:function(){if(this[0]){var a,b,c=this[0],d={top:0,left:0};return"fixed"===r.css(c,"position")?b=c.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),r.nodeName(a[0],"html")||(d=a.offset()),d={top:d.top+r.css(a[0],"borderTopWidth",!0),left:d.left+r.css(a[0],"borderLeftWidth",!0)}),{top:b.top-d.top-r.css(c,"marginTop",!0),left:b.left-d.left-r.css(c,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent;while(a&&"static"===r.css(a,"position"))a=a.offsetParent;return a||qa})}}),r.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c="pageYOffset"===b;r.fn[a]=function(d){return S(this,function(a,d,e){var f=Sb(a);return void 0===e?f?f[b]:a[d]:void(f?f.scrollTo(c?f.pageXOffset:e,c?e:f.pageYOffset):a[d]=e)},a,d,arguments.length)}}),r.each(["top","left"],function(a,b){r.cssHooks[b]=Oa(o.pixelPosition,function(a,c){if(c)return c=Na(a,b),La.test(c)?r(a).position()[b]+"px":c})}),r.each({Height:"height",Width:"width"},function(a,b){r.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){r.fn[d]=function(e,f){var g=arguments.length&&(c||"boolean"!=typeof e),h=c||(e===!0||f===!0?"margin":"border");return S(this,function(b,c,e){var f;return r.isWindow(b)?0===d.indexOf("outer")?b["inner"+a]:b.document.documentElement["client"+a]:9===b.nodeType?(f=b.documentElement,Math.max(b.body["scroll"+a],f["scroll"+a],b.body["offset"+a],f["offset"+a],f["client"+a])):void 0===e?r.css(b,c,h):r.style(b,c,e,h)},b,g?e:void 0,g)}})}),r.fn.extend({bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}}),r.parseJSON=JSON.parse,"function"==typeof define&&define.amd&&define("jquery",[],function(){return r});var Tb=a.jQuery,Ub=a.$;return r.noConflict=function(b){return a.$===r&&(a.$=Ub),b&&a.jQuery===r&&(a.jQuery=Tb),r},b||(a.jQuery=a.$=r),r});
(function () {
"use strict";
// Find out params in routing rules
var pattern = new RegExp("<[^:]*:?([^>]+)>", "g");
var result = null;
// $.each(g.rules, function (endpoint, rules) {
// $.each(rules, function (index, rule) {
// rule.params = [];
// while ((result = pattern.exec(rule.rule)) !== null) {
// rule.params.push(result[1]);
// }
// });
// });
/**
* Generate url for the endpoint.
* urlFor(endpoint [, parameters] [, external])
* @param endpoint
* @param parameters
* @param external
* @returns url for the endpoint.
*/
function urlFor(endpoint, parameters, external) {
var url = null,
params = [],
maxMatchDegree = 0.0,
keys;
if ($.type(parameters) === "boolean") {
external = parameters
}
parameters = ($.type(parameters) !== 'undefined') ? parameters : {};
external = ($.type(external) !== 'undefined') ? external : false;
if (g.rules[endpoint] === undefined) {
throw new Error("Uncorrect endpoint in urlFor(\"" + endpoint + "\", " +
JSON.stringify(parameters) + ")");
}
keys = $.map(parameters, function (value, key) {
return key;
});
// Find the first matched rule among rules in this endpoint.
$.each(g.rules[endpoint], function (index, rule) {
var match = true,
currentMatchDegree = 0.0;
$.each(rule.params, function (index, param) {
if ($.inArray(param, keys) === -1) {
match = false;
return false;
}
});
if (match) {
currentMatchDegree = parseFloat(rule.params.length) / keys.length;
if (currentMatchDegree > maxMatchDegree || url === null) {
maxMatchDegree = currentMatchDegree;
url = rule.rule;
params = rule.params;
}
}
});
if (url) {
$.each(keys, function (index, key) {
// Build in params
if ($.inArray(key, params) > -1) {
url = url.replace(new RegExp("<[^:]*:?" + key + ">"), parameters[key]);
} else {
// Query string params
if (url.indexOf("?") === -1) {
url += "?";
}
if (!endsWith(url, '?')) {
url += "&";
}
url += key + "=" + parameters[key];
}
});
} else {
throw new Error("Uncorrect parameters in urlFor(\"" + endpoint + "\", " +
JSON.stringify(parameters) + ")");
}
if (external) {
url = g.domain + url
}
return url;
}
/**
* Check whether str ends with suffix.
* @param str
* @param suffix
* @returns {boolean}
*/
function endsWith(str, suffix) {
return str.indexOf(suffix, str.length - suffix.length) !== -1;
}
/**
* Register context into global variable g.
* @param context
*/
function registerContext(context) {
if (typeof g === 'undefined') {
throw new Error("Global variable g is not defined.");
}
$.each(context, function (key, value) {
if (g.hasOwnProperty(key)) {
throw new Error("The key '" + key + "' already exists in the global variable g.");
}
g[key] = value;
});
}
/**
* Find elements in #main
* @param selector
* @returns {*|jQuery}
*/
function $page(selector) {
return $('#main').find(selector);
}
window.$page = $page;
window.urlFor = urlFor;
window.registerContext = registerContext;
})(); | PypiClean |
/DataProfiler-0.10.3-py3-none-any.whl/dataprofiler/validators/base_validators.py | """Build model for dataset by identifying col type along with its respective params."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, cast
if TYPE_CHECKING:
import dask.dataframe as dd
import pandas as pd
def is_in_range(x: str | int | float, config: dict) -> bool:
"""
Check to see x is in the range of the config.
:param x: number
:type x: int/float
:param config: configuration
:type config: dict
:returns: bool
"""
try:
return float(config["start"]) <= float(x) <= float(config["end"])
except Exception:
raise TypeError("Value is not a float")
def is_in_list(x: str | int | float, config: dict) -> bool:
"""
Check to see x is in the config list.
:param x: item
:type x: string
:param config: configuration
:type config: dict
:returns: bool
"""
return float(x) in config
class Validator:
"""For validating a data set."""
def __init__(self) -> None:
"""Initialize Validator object."""
self.config: dict | None = None
self.report: dict | None = None
self.validation_run: bool = False
self.validation_report: dict = dict()
def validate(self, data: pd.DataFrame | dd.DataFrame, config: dict) -> None:
"""
Validate a data set.
No option for validating a partial data set.
Set configuration on run not on instantiation of the class such that
you have the option to run multiple times with different configurations
without having to also reinstantiate the class.
:param data: The data to be processed by the validator. Processing
occurs in a column-wise fashion.
:type data: DataFrame Dask/Pandas
:param config: configuration for how the validator should
run across the given data. Validator will only run over columns
specified in the configuration.
:type config: dict
:Example:
This is an example of the config::
config = {
<column_name>: {
range: {
'start': 1,
'end':2
},
list: [1,2,3]
}
}
"""
if not config:
raise ValueError("Config is required")
known_anomaly_validation = config.get("known_anomaly_validation", {})
for iter_key, value in known_anomaly_validation.items():
if len(value) < 1:
raise Warning(
f"Pass at a minimum one value for a specified column "
f"(i.e. iter_key variable) -- not both for {iter_key}"
)
self.config = config
df_type = config.get("df_type", "").lower()
for iter_key, value in known_anomaly_validation.items():
self.validation_report[iter_key] = dict()
df_series = data[iter_key]
for sub_key, sub_value in value.items():
self.validation_report[iter_key][sub_key] = dict()
if sub_key not in ["range", "list"]:
raise TypeError("Range and list only acceptable key values.")
apply_type: Callable[[str | int | float, dict], bool] = (
is_in_range if sub_key == "range" else is_in_list
)
if df_type == "dask":
def apply_with_config(x: Any) -> bool:
return cast(bool, apply_type(x, sub_value))
temp_results = df_series.map(
apply_with_config, meta=(iter_key, "bool")
)
temp_results = temp_results.compute()
# Dask evaluates this to be an nd array so we have to
# convert it to a normal list
self.validation_report[iter_key][sub_key] = [
idx
for idx, val in enumerate(temp_results.values.tolist())
if val
]
elif df_type == "pandas":
temp_results = df_series.apply(apply_type, args=(sub_value,))
self.validation_report[iter_key][sub_key] = [
idx for idx, val in temp_results.items() if val
]
else:
raise ValueError(
"Dask and Pandas are the only supported dataframe " "types."
)
del temp_results
self.validation_run = True
def get(self) -> dict:
"""Get the results of the validation run."""
if self.validation_run:
return self.validation_report
else:
raise Warning(
"Precondition for get method not met. Must validate data prior "
"to getting results."
) | PypiClean |
/Hocrox-0.3.0.tar.gz/Hocrox-0.3.0/hocrox/layer/save.py | import os
import cv2
import numpy as np
from hocrox.utils import Layer
class Save(Layer):
"""Save layer saves images on the local filesystem.
Here is an example code to use the Save layer in a model.
```python
from hocrox.model import Model
from hocrox.layer import Read, Save
# Initializing the model
model = Model()
# Adding model layers
model.add(Read(path="./img"))
model.add(Save(path="./img_to_store", format="npy"))
# Printing the summary of the model
print(model.summary())
```
"""
def __init__(self, path, format="npy", name=None):
"""Init method for the Save layer.
Args:
path (str): Path to store the image
format (str, optional): Format to save the image. Supported formats are npy and img. Defaults to "npy".
name (str, optional): Name of the layer, if not provided then automatically generates a unique name for
the layer. Defaults to None.
Raises:
ValueError: If the name parameter is invalid
ValueError: If the format parameter is invalid
"""
if path and not isinstance(path, str):
raise ValueError(f"The value {path} for the argument path is not valid")
if format not in ("npy", "img"):
raise ValueError(f"The value {format} for the argument format is not valid")
self.__path = path
self.__format = format
super().__init__(
name,
"save",
self.STANDARD_SUPPORTED_LAYERS,
f"Path: {self.__path}, Format: {self.__format}",
)
def _apply_layer(self, images, name=None):
"""Apply the transformation method to change the layer.
Args:
images (list[ndarray]): List of images to transform.
name (str, optional): Name of the image series, used for saving the images. Defaults to None.
Returns:
list[ndarray]: Return the transform images
"""
for index, image in enumerate(images):
if image is not None and len(image) != 0:
layer_name = self._get_name()
filename = f"{layer_name}_{index}_{name}"
if self.__format == "npy":
np.save(os.path.join(self.__path, filename + ".npy"), image)
else:
cv2.imwrite(os.path.join(self.__path, filename), image)
return images | PypiClean |
/CsuPTMD-1.0.12.tar.gz/CsuPTMD-1.0.12/PTMD/maskrcnn_benchmark/apex/apex/amp/utils.py | from . import compat
import functools
import itertools
import torch
def is_cuda_enabled():
return torch.version.cuda is not None
def get_cuda_version():
return tuple(int(x) for x in torch.version.cuda.split('.'))
def is_fp_tensor(x):
if is_nested(x):
# Fast-fail version of all(is_fp_tensor)
for y in x:
if not is_fp_tensor(y):
return False
return True
return compat.is_tensor_like(x) and compat.is_floating_point(x)
def is_nested(x):
return isinstance(x, tuple) or isinstance(x, list)
def should_cache(x):
if is_nested(x):
# Fast-fail version of all(should_cache)
for y in x:
if not should_cache(y):
return False
return True
return isinstance(x, torch.nn.parameter.Parameter) and \
type_string(x) == 'FloatTensor'
def collect_fp_tensor_types(args, kwargs):
def collect_types(x, types):
if is_nested(x):
for y in x:
collect_types(y, types)
else:
types.add(type_string(x))
all_args = itertools.chain(args, kwargs.values())
types = set()
for x in all_args:
if is_fp_tensor(x):
collect_types(x, types)
return types
def type_string(x):
return x.type().split('.')[-1]
def maybe_half(x, name='', verbose=False):
if is_nested(x):
return type(x)([maybe_half(y) for y in x])
if not x.is_cuda or type_string(x) == 'HalfTensor':
return x
else:
if verbose:
print('Float->Half ({})'.format(name))
return x.half()
def maybe_float(x, name='', verbose=False):
if is_nested(x):
return type(x)([maybe_float(y) for y in x])
if not x.is_cuda or type_string(x) == 'FloatTensor':
return x
else:
if verbose:
print('Half->Float ({})'.format(name))
return x.float()
# NB: returneds casted `args`, mutates `kwargs` in-place
def casted_args(cast_fn, args, kwargs):
new_args = []
for x in args:
if is_fp_tensor(x):
new_args.append(cast_fn(x))
else:
new_args.append(x)
for k in kwargs:
val = kwargs[k]
if is_fp_tensor(val):
kwargs[k] = cast_fn(val)
return new_args
def cached_cast(cast_fn, x, cache):
if is_nested(x):
return type(x)([cached_cast(y) for y in x])
if x in cache:
cached_x = cache[x]
if x.requires_grad and cached_x.requires_grad:
# Make sure x is actually cached_x's autograd parent.
if cached_x.grad_fn.next_functions[1][0].variable is not x:
raise RuntimeError("x and cache[x] both require grad, but x is not "
"cache[x]'s parent. This is likely an error.")
# During eval, it's possible to end up caching casted weights with
# requires_grad=False. On the next training iter, if cached_x is found
# and reused from the cache, it will not actually have x as its parent.
# Therefore, we choose to invalidate the cache (and force refreshing the cast)
# if x.requires_grad and cached_x.requires_grad do not match.
#
# During eval (i.e. running under with torch.no_grad()) the invalidation
# check would cause the cached value to be dropped every time, because
# cached_x would always be created with requires_grad=False, while x would
# still have requires_grad=True. This would render the cache effectively
# useless during eval. Therefore, if we are running under the no_grad()
# context manager (torch.is_grad_enabled=False) we elide the invalidation
# check, and use the cached value even though its requires_grad flag doesn't
# match. During eval, we don't care that there's no autograd-graph
# connection between x and cached_x.
if torch.is_grad_enabled() and x.requires_grad != cached_x.requires_grad:
del cache[x]
else:
return cached_x
casted_x = cast_fn(x)
cache[x] = casted_x
return casted_x
def verbosify(cast_fn, fn_name, verbose):
if verbose:
return functools.partial(cast_fn, name=fn_name, verbose=verbose)
else:
return cast_fn
def as_inplace(fns):
for x in fns:
yield x + '_'
def has_func(mod, fn):
if isinstance(mod, dict):
return fn in mod
else:
return hasattr(mod, fn)
def get_func(mod, fn):
if isinstance(mod, dict):
return mod[fn]
else:
return getattr(mod, fn)
def set_func(mod, fn, new_fn):
if isinstance(mod, dict):
mod[fn] = new_fn
else:
setattr(mod, fn, new_fn)
def set_func_save(handle, mod, fn, new_fn):
cur_fn = get_func(mod, fn)
handle._save_func(mod, fn, cur_fn)
set_func(mod, fn, new_fn)
# A couple problems get solved here:
# - The flat_weight buffer is disconnected from autograd graph,
# so the fp16 weights need to be derived from the input weights
# to this forward call, not the flat buffer.
# - The ordering of weights in the flat buffer is...idiosyncratic.
# First problem is solved with combination of set_ (to set up
# correct storage) and copy_ (so the fp16 weight derives from the
# fp32 one in autograd.
# Second is solved by doing ptr arithmetic on the fp32 weights
# to derive the correct offset.
#
# TODO: maybe this should actually use
# `torch._cudnn_rnn_flatten_weight`? But then I need to call
# on first iter and cache the right offsets. Ugh.
def synthesize_flattened_rnn_weights(fp32_weights,
fp16_flat_tensor,
rnn_fn='',
verbose=False):
fp16_weights = []
fp32_base_ptr = fp32_weights[0][0].data_ptr()
for layer_weights in fp32_weights:
fp16_layer_weights = []
for w_fp32 in layer_weights:
w_fp16 = w_fp32.new().half()
offset = (w_fp32.data_ptr() - fp32_base_ptr) // w_fp32.element_size()
w_fp16.set_(fp16_flat_tensor.storage(),
offset,
w_fp32.shape)
w_fp16.copy_(w_fp32)
if verbose:
print('Float->Half ({})'.format(rnn_fn))
fp16_layer_weights.append(w_fp16)
fp16_weights.append(fp16_layer_weights)
return fp16_weights
# Roughly same as above, just the `fp32_weights` aren't nested.
# Code kept separate for readability.
def new_synthesize_flattened_rnn_weights(fp32_weights,
fp16_flat_tensor,
rnn_fn='',
verbose=False):
fp16_weights = []
fp32_base_ptr = fp32_weights[0].data_ptr()
for w_fp32 in fp32_weights:
w_fp16 = w_fp32.new().half()
offset = (w_fp32.data_ptr() - fp32_base_ptr) // w_fp32.element_size()
w_fp16.set_(fp16_flat_tensor.storage(),
offset,
w_fp32.shape)
w_fp16.copy_(w_fp32)
if verbose:
print('Float->Half ({})'.format(rnn_fn))
fp16_weights.append(w_fp16)
return fp16_weights | PypiClean |
/AYABInterface-0.0.9-py3-none-any.whl/AYABInterface-0.0.9.dist-info/DESCRIPTION.rst | AYABInterface
=============
.. image:: https://travis-ci.org/fossasia/AYABInterface.svg
:target: https://travis-ci.org/fossasia/AYABInterface
:alt: Build Status
.. image:: https://ci.appveyor.com/api/projects/status/a6yhbt0rqvb212s7?svg=true
:target: https://ci.appveyor.com/project/AllYarnsAreBeautiful/AYABInterface
:alt: AppVeyor CI build status (Windows)
.. image:: https://codeclimate.com/github/fossasia/AYABInterface/badges/gpa.svg
:target: https://codeclimate.com/github/fossasia/AYABInterface
:alt: Code Climate
.. image:: https://codeclimate.com/github/fossasia/AYABInterface/badges/coverage.svg
:target: https://codeclimate.com/github/fossasia/AYABInterface/coverage
:alt: Test Coverage
.. image:: https://codeclimate.com/github/fossasia/AYABInterface/badges/issue_count.svg
:target: https://codeclimate.com/github/fossasia/AYABInterface
:alt: Issue Count
.. image:: https://badge.fury.io/py/AYABInterface.svg
:target: https://pypi.python.org/pypi/AYABInterface
:alt: Python Package Version on Pypi
.. image:: https://img.shields.io/pypi/dm/AYABInterface.svg
:target: https://pypi.python.org/pypi/AYABInterface#downloads
:alt: Downloads from Pypi
.. image:: https://readthedocs.org/projects/ayabinterface/badge/?version=latest
:target: http://ayabinterface.readthedocs.io/en/latest/?badge=latest
:alt: Documentation Status
.. image:: https://landscape.io/github/fossasia/AYABInterface/master/landscape.svg?style=flat
:target: https://landscape.io/github/fossasia/AYABInterface/master
:alt: Code Health
.. image:: https://badge.waffle.io/fossasia/AYABInterface.svg?label=ready&title=issues%20ready
:target: https://waffle.io/fossasia/AYABInterface
:alt: Issues ready to work on
A Python library with the interface to the AYAB shield.
For installation instructions and more, `see the documentation
<http://AYABInterface.readthedocs.io/>`__.
| PypiClean |
/DJModels-0.0.6-py3-none-any.whl/djmodels/utils/version.py | import datetime
import functools
import os
import subprocess
import sys
from distutils.version import LooseVersion
# Private, stable API for detecting the Python version. PYXY means "Python X.Y
# or later". So that third-party apps can use these values, each constant
# should remain as long as the oldest supported Django version supports that
# Python version.
PY36 = sys.version_info >= (3, 6)
PY37 = sys.version_info >= (3, 7)
PY38 = sys.version_info >= (3, 8)
PY39 = sys.version_info >= (3, 9)
def get_version(version=None):
"""Return a PEP 440-compliant version number from VERSION."""
version = get_complete_version(version)
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|rc}N - for alpha, beta, and rc releases
main = get_main_version(version)
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_main_version(version=None):
"""Return main version (X.Y[.Z]) from VERSION."""
version = get_complete_version(version)
parts = 2 if version[2] == 0 else 3
return '.'.join(str(x) for x in version[:parts])
def get_complete_version(version=None):
"""
Return a tuple of the djmodels version. If version argument is non-empty,
check for correctness of the tuple provided.
"""
if version is None:
from djmodels import VERSION as version
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
return version
def get_docs_version(version=None):
version = get_complete_version(version)
if version[3] != 'final':
return 'dev'
else:
return '%d.%d' % version[:2]
@functools.lru_cache()
def get_git_changeset():
"""Return a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
'git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
def get_version_tuple(version):
"""
Return a tuple of version numbers (e.g. (1, 2, 3)) from the version
string (e.g. '1.2.3').
"""
loose_version = LooseVersion(version)
version_numbers = []
for item in loose_version.version:
if not isinstance(item, int):
break
version_numbers.append(item)
return tuple(version_numbers) | PypiClean |
/DEWAKSS-1.0.1.tar.gz/DEWAKSS-1.0.1/notebook/DEWAKSS_data.ipynb | ```
import pandas as pd
import numpy as np
pd.set_option('precision',3)
np.set_printoptions(precision=3, linewidth=1000)
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
# color_pallet = "muted"
color_pallet = "deep"
plt.style.use('seaborn-ticks')
sns.set_color_codes(color_pallet)
plt.rcParams['svg.fonttype'] = 'none'
import scanpy as sc
import os
datadir = os.path.join("..", "data", gitbranch)
plt.rcParams['svg.fonttype'] = 'none'
sc.settings.figdir = figdir
sc.settings.file_format_figs = "svg"
```
# Denoise data
Load data that has been preprocessed, Count normalized and transformed.
```
adata = sc.read(os.path.join(datadir, "adata_filtered.h5ad"))
from dewakss import denoise as dewakss
# neigbours = list(range(3,251))
neigbours = list(range(3, 100, 5))
npcss = [50, 100, 200]
# pca_args = {'use_highly_variable': True} # This will effect the umap.
pca_args = {}
dewaxer = dewakss.DEWAKSS(adata, n_neighbors=neigbours, n_pcs=npcss, use_global_err=False, create_layer='denoised', modest='max', pca_args=pca_args)
dewaxer.fit(adata)
fig, ax = dewaxer.plot_global_performance()
fig, ax = dewaxer.plot_local_performance()
fig, ax = dewaxer.plot_local_neighbour_hist()
dewaxer.transform(adata, copy=False)
print(adata)
adata.write(os.path.join(datadir, "adata_dewakss_computed.h5ad"))
```
# Analyse denoised data
```
adata = sc.read(os.path.join(datadir, "adata_dewakss_computed.h5ad"))
adata.layers['norm'] = adata.X.copy()
adata.X = adata.layers['denoised'].copy()
```
| PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/gauges/BarGauge.js | define("dojox/gauges/BarGauge",["dojo/_base/declare","dojo/_base/lang","dojo/_base/array","dojo/_base/html","dojo/_base/event","dojox/gfx","./_Gauge","./BarLineIndicator","dojo/dom-geometry"],function(_1,_2,_3,_4,_5,_6,_7,_8,_9){
return _1("dojox.gauges.BarGauge",_7,{dataX:5,dataY:5,dataWidth:0,dataHeight:0,_defaultIndicator:_8,startup:function(){
if(this.getChildren){
_3.forEach(this.getChildren(),function(_a){
_a.startup();
});
}
if(!this.dataWidth){
this.dataWidth=this.gaugeWidth-10;
}
if(!this.dataHeight){
this.dataHeight=this.gaugeHeight-10;
}
this.inherited(arguments);
},_getPosition:function(_b){
return this.dataX+Math.floor((_b-this.min)/(this.max-this.min)*this.dataWidth);
},_getValueForPosition:function(_c){
return (_c-this.dataX)*(this.max-this.min)/this.dataWidth+this.min;
},drawRange:function(_d,_e){
if(_e.shape){
_e.shape.parent.remove(_e.shape);
_e.shape=null;
}
var x1=this._getPosition(_e.low);
var x2=this._getPosition(_e.high);
var _f=_d.createRect({x:x1,y:this.dataY,width:x2-x1,height:this.dataHeight});
if(_2.isArray(_e.color)||_2.isString(_e.color)){
_f.setStroke({color:_e.color});
_f.setFill(_e.color);
}else{
if(_e.color.type){
var y=this.dataY+this.dataHeight/2;
_e.color.x1=x1;
_e.color.x2=x2;
_e.color.y1=y;
_e.color.y2=y;
_f.setFill(_e.color);
_f.setStroke({color:_e.color.colors[0].color});
}else{
if(_6.svg){
_f.setStroke({color:"green"});
_f.setFill("green");
_f.getEventSource().setAttribute("class",_e.color.style);
}
}
}
_f.connect("onmouseover",_2.hitch(this,this._handleMouseOverRange,_e));
_f.connect("onmouseout",_2.hitch(this,this._handleMouseOutRange,_e));
_e.shape=_f;
},getRangeUnderMouse:function(e){
var _10=null;
var pos=_9.getContentBox(this.gaugeContent);
var x=e.clientX-pos.x;
var _11=this._getValueForPosition(x);
if(this._rangeData){
for(var i=0;(i<this._rangeData.length)&&!_10;i++){
if((Number(this._rangeData[i].low)<=_11)&&(Number(this._rangeData[i].high)>=_11)){
_10=this._rangeData[i];
}
}
}
return _10;
},_dragIndicator:function(_12,e){
this._dragIndicatorAt(_12,e.pageX,e.pageY);
_5.stop(e);
},_dragIndicatorAt:function(_13,x,y){
var pos=_9.position(_13.gaugeContent,true);
var xl=x-pos.x;
var _14=_13._getValueForPosition(xl);
if(_14<_13.min){
_14=_13.min;
}
if(_14>_13.max){
_14=_13.max;
}
_13._drag.value=_14;
_13._drag.onDragMove(_13._drag);
_13._drag.draw(this._indicatorsGroup,true);
_13._drag.valueChanged();
}});
}); | PypiClean |
/CmonCrawl-1.0.3.tar.gz/CmonCrawl-1.0.3/cmoncrawl/common/types.py | from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List
from urllib.parse import urlparse
from dataclasses import dataclass, field
from marshmallow import fields
from dataclasses_json import dataclass_json, config
@dataclass_json
@dataclass
class DomainRecord:
"""
Domain record.
"""
filename: str
url: str | None
offset: int
length: int
digest: str | None = None
encoding: str | None = None
timestamp: datetime | None = field(
metadata=config(mm_field=fields.DateTime(format="iso")), default=None
)
@dataclass
class PipeMetadata:
"""
Metadata for a pipe.
"""
domain_record: DomainRecord
article_data: Dict[Any, Any] = field(default_factory=dict)
warc_header: Dict[str, Any] = field(default_factory=dict)
http_header: Dict[str, Any] = field(default_factory=dict)
rec_type: str | None = None
encoding: str = "latin-1"
name: str | None = None
def __post_init__(self):
self.url_parsed = urlparse(self.domain_record.url)
@dataclass
class RetrieveResponse:
"""
Response from retrieve.
"""
status: int
content: Any
reason: None | str
@dataclass
class DomainCrawl:
"""
Domain crawl.
"""
domain: str = ""
cdx_server: str = ""
page: int = 0
# ===============================================================================
# Extractor config
@dataclass_json
@dataclass
class ExtractorConfig:
"""
Configuration for extractor.
"""
name: str
since: datetime | None = field(
metadata=config(mm_field=fields.DateTime(format="iso")), default=None
)
to: datetime | None = field(
metadata=config(mm_field=fields.DateTime(format="iso")), default=None
)
@dataclass_json
@dataclass
class RoutesConfig:
"""
Configuration for extractors.
"""
regexes: list[str] = field(default_factory=list)
extractors: list[ExtractorConfig] = field(default_factory=list)
@dataclass_json
@dataclass
class ExtractConfig:
"""
Configuration for run.
"""
extractors_path: Path
routes: List[RoutesConfig]
class MatchType(Enum):
"""
Match type for cdx server.
"""
EXACT = "exact"
PREFIX = "prefix"
HOST = "host"
DOMAIN = "domain" | PypiClean |
/Bugs%20Everywhere%20(BEurtle%20fork)-1.5.0.1.-2012-07-16-.zip/Bugs Everywhere (BEurtle fork)-1.5.0.1.-2012-07-16-/libbe/bugdir.py | import copy
import errno
import os
import os.path
import time
import libbe
import libbe.storage as storage
from libbe.storage.util.properties import Property, doc_property, \
local_property, defaulting_property, checked_property, \
fn_checked_property, cached_property, primed_property, \
change_hook_property, settings_property
import libbe.storage.util.settings_object as settings_object
import libbe.storage.util.mapfile as mapfile
import libbe.bug as bug
import libbe.util.utility as utility
import libbe.util.id
if libbe.TESTING == True:
import doctest
import sys
import unittest
import libbe.storage.base
class NoBugMatches(libbe.util.id.NoIDMatches):
def __init__(self, *args, **kwargs):
libbe.util.id.NoIDMatches.__init__(self, *args, **kwargs)
def __str__(self):
if self.msg == None:
return 'No bug matches %s' % self.id
return self.msg
class BugDir (list, settings_object.SavedSettingsObject):
"""A BugDir is a container for :class:`~libbe.bug.Bug`\s, with some
additional attributes.
Parameters
----------
storage : :class:`~libbe.storage.base.Storage`
Storage instance containing the bug directory. If
`from_storage` is `False`, `storage` may be `None`.
uuid : str, optional
Set the bugdir UUID (see :mod:`libbe.util.id`).
Useful if you are loading one of several bugdirs
stored in a single Storage instance.
from_storage : bool, optional
If `True`, attempt to load from storage. Otherwise,
setup in memory, saving to `storage` if it is not `None`.
See Also
--------
:class:`SimpleBugDir` for some bugdir manipulation exampes.
"""
settings_properties = []
required_saved_properties = []
_prop_save_settings = settings_object.prop_save_settings
_prop_load_settings = settings_object.prop_load_settings
def _versioned_property(settings_properties=settings_properties,
required_saved_properties=required_saved_properties,
**kwargs):
if "settings_properties" not in kwargs:
kwargs["settings_properties"] = settings_properties
if "required_saved_properties" not in kwargs:
kwargs["required_saved_properties"]=required_saved_properties
return settings_object.versioned_property(**kwargs)
@_versioned_property(name="target",
doc="The current project development target.")
def target(): return {}
def _setup_severities(self, severities):
if severities not in [None, settings_object.EMPTY]:
bug.load_severities(severities)
def _set_severities(self, old_severities, new_severities):
self._setup_severities(new_severities)
self._prop_save_settings(old_severities, new_severities)
@_versioned_property(name="severities",
doc="The allowed bug severities and their descriptions.",
change_hook=_set_severities)
def severities(): return {}
def _setup_status(self, active_status, inactive_status):
bug.load_status(active_status, inactive_status)
def _set_active_status(self, old_active_status, new_active_status):
self._setup_status(new_active_status, self.inactive_status)
self._prop_save_settings(old_active_status, new_active_status)
@_versioned_property(name="active_status",
doc="The allowed active bug states and their descriptions.",
change_hook=_set_active_status)
def active_status(): return {}
def _set_inactive_status(self, old_inactive_status, new_inactive_status):
self._setup_status(self.active_status, new_inactive_status)
self._prop_save_settings(old_inactive_status, new_inactive_status)
@_versioned_property(name="inactive_status",
doc="The allowed inactive bug states and their descriptions.",
change_hook=_set_inactive_status)
def inactive_status(): return {}
def _extra_strings_check_fn(value):
return utility.iterable_full_of_strings(value, \
alternative=settings_object.EMPTY)
def _extra_strings_change_hook(self, old, new):
self.extra_strings.sort() # to make merging easier
self._prop_save_settings(old, new)
@_versioned_property(name="extra_strings",
doc="Space for an array of extra strings. Useful for storing state for functionality implemented purely in becommands/<some_function>.py.",
default=[],
check_fn=_extra_strings_check_fn,
change_hook=_extra_strings_change_hook,
mutable=True)
def extra_strings(): return {}
def _bug_map_gen(self):
map = {}
for bug in self:
map[bug.uuid] = bug
for uuid in self.uuids():
if uuid not in map:
map[uuid] = None
self._bug_map_value = map # ._bug_map_value used by @local_property
@Property
@primed_property(primer=_bug_map_gen)
@local_property("bug_map")
@doc_property(doc="A dict of (bug-uuid, bug-instance) pairs.")
def _bug_map(): return {}
def __init__(self, storage, uuid=None, from_storage=False):
list.__init__(self)
settings_object.SavedSettingsObject.__init__(self)
self.storage = storage
self.id = libbe.util.id.ID(self, 'bugdir')
self.uuid = uuid
if from_storage == True:
if self.uuid == None:
self.uuid = [c for c in self.storage.children()
if c != 'version'][0]
self.load_settings()
else:
if self.uuid == None:
self.uuid = libbe.util.id.uuid_gen()
if self.storage != None and self.storage.is_writeable():
self.save()
# methods for saving/loading/accessing settings and properties.
def load_settings(self, settings_mapfile=None):
if settings_mapfile == None:
settings_mapfile = \
self.storage.get(self.id.storage('settings'), default='\n')
try:
settings = mapfile.parse(settings_mapfile)
except mapfile.InvalidMapfileContents, e:
raise Exception('Invalid settings file for bugdir %s\n'
'(BE version missmatch?)' % self.id.user())
self._setup_saved_settings(settings)
self._setup_severities(self.severities)
self._setup_status(self.active_status, self.inactive_status)
def save_settings(self):
mf = mapfile.generate(self._get_saved_settings())
self.storage.set(self.id.storage('settings'), mf)
def load_all_bugs(self):
"""
Warning: this could take a while.
"""
self._clear_bugs()
for uuid in self.uuids():
self._load_bug(uuid)
def save(self):
"""
Save any loaded contents to storage. Because of lazy loading
of bugs and comments, this is actually not too inefficient.
However, if self.storage.is_writeable() == True, then any
changes are automatically written to storage as soon as they
happen, so calling this method will just waste time (unless
something else has been messing with your stored files).
"""
self.storage.add(self.id.storage(), directory=True)
self.storage.add(self.id.storage('settings'), parent=self.id.storage(),
directory=False)
self.save_settings()
for bug in self:
bug.save()
# methods for managing bugs
def uuids(self, use_cached_disk_uuids=True):
if use_cached_disk_uuids==False or not hasattr(self, '_uuids_cache'):
self._refresh_uuid_cache()
self._uuids_cache = self._uuids_cache.union([bug.uuid for bug in self])
return self._uuids_cache
def _refresh_uuid_cache(self):
self._uuids_cache = set()
# list bugs that are in storage
if self.storage != None and self.storage.is_readable():
child_uuids = libbe.util.id.child_uuids(
self.storage.children(self.id.storage()))
for id in child_uuids:
self._uuids_cache.add(id)
def _clear_bugs(self):
while len(self) > 0:
self.pop()
if hasattr(self, '_uuids_cache'):
del(self._uuids_cache)
self._bug_map_gen()
def _load_bug(self, uuid):
bg = bug.Bug(bugdir=self, uuid=uuid, from_storage=True)
self.append(bg)
self._bug_map_gen()
return bg
def new_bug(self, summary=None, _uuid=None):
bg = bug.Bug(bugdir=self, uuid=_uuid, summary=summary,
from_storage=False)
self.append(bg)
self._bug_map_gen()
if hasattr(self, '_uuids_cache') and not bg.uuid in self._uuids_cache:
self._uuids_cache.add(bg.uuid)
return bg
def remove_bug(self, bug):
if hasattr(self, '_uuids_cache') and bug.uuid in self._uuids_cache:
self._uuids_cache.remove(bug.uuid)
self.remove(bug)
if self.storage != None and self.storage.is_writeable():
bug.remove()
def bug_from_uuid(self, uuid):
if not self.has_bug(uuid):
raise NoBugMatches(
uuid, self.uuids(),
'No bug matches %s in %s' % (uuid, self.storage))
if self._bug_map[uuid] == None:
self._load_bug(uuid)
return self._bug_map[uuid]
def has_bug(self, bug_uuid):
if bug_uuid not in self._bug_map:
self._bug_map_gen()
if bug_uuid not in self._bug_map:
return False
return True
# methods for id generation
def sibling_uuids(self):
return []
class RevisionedBugDir (BugDir):
"""
RevisionedBugDirs are read-only copies used for generating
diffs between revisions.
"""
def __init__(self, bugdir, revision):
storage_version = bugdir.storage.storage_version(revision)
if storage_version != libbe.storage.STORAGE_VERSION:
raise libbe.storage.InvalidStorageVersion(storage_version)
s = copy.deepcopy(bugdir.storage)
s.writeable = False
class RevisionedStorage (object):
def __init__(self, storage, default_revision):
self.s = storage
self.sget = self.s.get
self.sancestors = self.s.ancestors
self.schildren = self.s.children
self.schanged = self.s.changed
self.r = default_revision
def get(self, *args, **kwargs):
if not 'revision' in kwargs or kwargs['revision'] == None:
kwargs['revision'] = self.r
return self.sget(*args, **kwargs)
def ancestors(self, *args, **kwargs):
print 'getting ancestors', args, kwargs
if not 'revision' in kwargs or kwargs['revision'] == None:
kwargs['revision'] = self.r
ret = self.sancestors(*args, **kwargs)
print 'got ancestors', ret
return ret
def children(self, *args, **kwargs):
if not 'revision' in kwargs or kwargs['revision'] == None:
kwargs['revision'] = self.r
return self.schildren(*args, **kwargs)
def changed(self, *args, **kwargs):
if not 'revision' in kwargs or kwargs['revision'] == None:
kwargs['revision'] = self.r
return self.schanged(*args, **kwargs)
rs = RevisionedStorage(s, revision)
s.get = rs.get
s.ancestors = rs.ancestors
s.children = rs.children
s.changed = rs.changed
BugDir.__init__(self, s, from_storage=True)
self.revision = revision
def changed(self):
return self.storage.changed()
if libbe.TESTING == True:
class SimpleBugDir (BugDir):
"""
For testing. Set ``memory=True`` for a memory-only bugdir.
>>> bugdir = SimpleBugDir()
>>> uuids = list(bugdir.uuids())
>>> uuids.sort()
>>> print uuids
['a', 'b']
>>> bugdir.cleanup()
"""
def __init__(self, memory=True, versioned=False):
if memory == True:
storage = None
else:
dir = utility.Dir()
self._dir_ref = dir # postpone cleanup since dir.cleanup() removes dir.
if versioned == False:
storage = libbe.storage.base.Storage(dir.path)
else:
storage = libbe.storage.base.VersionedStorage(dir.path)
storage.init()
storage.connect()
BugDir.__init__(self, storage=storage, uuid='abc123')
bug_a = self.new_bug(summary='Bug A', _uuid='a')
bug_a.creator = 'John Doe <[email protected]>'
bug_a.time = 0
bug_b = self.new_bug(summary='Bug B', _uuid='b')
bug_b.creator = 'Jane Doe <[email protected]>'
bug_b.time = 0
bug_b.status = 'closed'
if self.storage != None:
self.storage.disconnect() # flush to storage
self.storage.connect()
def cleanup(self):
if self.storage != None:
self.storage.writeable = True
self.storage.disconnect()
self.storage.destroy()
if hasattr(self, '_dir_ref'):
self._dir_ref.cleanup()
def flush_reload(self):
if self.storage != None:
self.storage.disconnect()
self.storage.connect()
self._clear_bugs()
# class BugDirTestCase(unittest.TestCase):
# def setUp(self):
# self.dir = utility.Dir()
# self.bugdir = BugDir(self.dir.path, sink_to_existing_root=False,
# allow_storage_init=True)
# self.storage = self.bugdir.storage
# def tearDown(self):
# self.bugdir.cleanup()
# self.dir.cleanup()
# def fullPath(self, path):
# return os.path.join(self.dir.path, path)
# def assertPathExists(self, path):
# fullpath = self.fullPath(path)
# self.failUnless(os.path.exists(fullpath)==True,
# "path %s does not exist" % fullpath)
# self.assertRaises(AlreadyInitialized, BugDir,
# self.dir.path, assertNewBugDir=True)
# def versionTest(self):
# if self.storage != None and self.storage.versioned == False:
# return
# original = self.bugdir.storage.commit("Began versioning")
# bugA = self.bugdir.bug_from_uuid("a")
# bugA.status = "fixed"
# self.bugdir.save()
# new = self.storage.commit("Fixed bug a")
# dupdir = self.bugdir.duplicate_bugdir(original)
# self.failUnless(dupdir.root != self.bugdir.root,
# "%s, %s" % (dupdir.root, self.bugdir.root))
# bugAorig = dupdir.bug_from_uuid("a")
# self.failUnless(bugA != bugAorig,
# "\n%s\n%s" % (bugA.string(), bugAorig.string()))
# bugAorig.status = "fixed"
# self.failUnless(bug.cmp_status(bugA, bugAorig)==0,
# "%s, %s" % (bugA.status, bugAorig.status))
# self.failUnless(bug.cmp_severity(bugA, bugAorig)==0,
# "%s, %s" % (bugA.severity, bugAorig.severity))
# self.failUnless(bug.cmp_assigned(bugA, bugAorig)==0,
# "%s, %s" % (bugA.assigned, bugAorig.assigned))
# self.failUnless(bug.cmp_time(bugA, bugAorig)==0,
# "%s, %s" % (bugA.time, bugAorig.time))
# self.failUnless(bug.cmp_creator(bugA, bugAorig)==0,
# "%s, %s" % (bugA.creator, bugAorig.creator))
# self.failUnless(bugA == bugAorig,
# "\n%s\n%s" % (bugA.string(), bugAorig.string()))
# self.bugdir.remove_duplicate_bugdir()
# self.failUnless(os.path.exists(dupdir.root)==False,
# str(dupdir.root))
# def testRun(self):
# self.bugdir.new_bug(uuid="a", summary="Ant")
# self.bugdir.new_bug(uuid="b", summary="Cockroach")
# self.bugdir.new_bug(uuid="c", summary="Praying mantis")
# length = len(self.bugdir)
# self.failUnless(length == 3, "%d != 3 bugs" % length)
# uuids = list(self.bugdir.uuids())
# self.failUnless(len(uuids) == 3, "%d != 3 uuids" % len(uuids))
# self.failUnless(uuids == ["a","b","c"], str(uuids))
# bugA = self.bugdir.bug_from_uuid("a")
# bugAprime = self.bugdir.bug_from_shortname("a")
# self.failUnless(bugA == bugAprime, "%s != %s" % (bugA, bugAprime))
# self.bugdir.save()
# self.versionTest()
# def testComments(self, sync_with_disk=False):
# if sync_with_disk == True:
# self.bugdir.set_sync_with_disk(True)
# self.bugdir.new_bug(uuid="a", summary="Ant")
# bug = self.bugdir.bug_from_uuid("a")
# comm = bug.comment_root
# rep = comm.new_reply("Ants are small.")
# rep.new_reply("And they have six legs.")
# if sync_with_disk == False:
# self.bugdir.save()
# self.bugdir.set_sync_with_disk(True)
# self.bugdir._clear_bugs()
# bug = self.bugdir.bug_from_uuid("a")
# bug.load_comments()
# if sync_with_disk == False:
# self.bugdir.set_sync_with_disk(False)
# self.failUnless(len(bug.comment_root)==1, len(bug.comment_root))
# for index,comment in enumerate(bug.comments()):
# if index == 0:
# repLoaded = comment
# self.failUnless(repLoaded.uuid == rep.uuid, repLoaded.uuid)
# self.failUnless(comment.sync_with_disk == sync_with_disk,
# comment.sync_with_disk)
# self.failUnless(comment.content_type == "text/plain",
# comment.content_type)
# self.failUnless(repLoaded.settings["Content-type"] == \
# "text/plain",
# repLoaded.settings)
# self.failUnless(repLoaded.body == "Ants are small.",
# repLoaded.body)
# elif index == 1:
# self.failUnless(comment.in_reply_to == repLoaded.uuid,
# repLoaded.uuid)
# self.failUnless(comment.body == "And they have six legs.",
# comment.body)
# else:
# self.failIf(True,
# "Invalid comment: %d\n%s" % (index, comment))
# def testSyncedComments(self):
# self.testComments(sync_with_disk=True)
class SimpleBugDirTestCase (unittest.TestCase):
def setUp(self):
# create a pre-existing bugdir in a temporary directory
self.dir = utility.Dir()
self.storage = libbe.storage.base.Storage(self.dir.path)
self.storage.init()
self.storage.connect()
self.bugdir = BugDir(self.storage)
self.bugdir.new_bug(summary="Hopefully not imported",
_uuid="preexisting")
self.storage.disconnect()
self.storage.connect()
def tearDown(self):
if self.storage != None:
self.storage.disconnect()
self.storage.destroy()
self.dir.cleanup()
def testOnDiskCleanLoad(self):
"""
SimpleBugDir(memory==False) should not import
preexisting bugs.
"""
bugdir = SimpleBugDir(memory=False)
self.failUnless(bugdir.storage.is_readable() == True,
bugdir.storage.is_readable())
self.failUnless(bugdir.storage.is_writeable() == True,
bugdir.storage.is_writeable())
uuids = sorted([bug.uuid for bug in bugdir])
self.failUnless(uuids == ['a', 'b'], uuids)
bugdir.flush_reload()
uuids = sorted(bugdir.uuids())
self.failUnless(uuids == ['a', 'b'], uuids)
uuids = sorted([bug.uuid for bug in bugdir])
self.failUnless(uuids == [], uuids)
bugdir.load_all_bugs()
uuids = sorted([bug.uuid for bug in bugdir])
self.failUnless(uuids == ['a', 'b'], uuids)
bugdir.cleanup()
def testInMemoryCleanLoad(self):
"""
SimpleBugDir(memory==True) should not import
preexisting bugs.
"""
bugdir = SimpleBugDir(memory=True)
self.failUnless(bugdir.storage == None, bugdir.storage)
uuids = sorted([bug.uuid for bug in bugdir])
self.failUnless(uuids == ['a', 'b'], uuids)
uuids = sorted([bug.uuid for bug in bugdir])
self.failUnless(uuids == ['a', 'b'], uuids)
bugdir._clear_bugs()
uuids = sorted(bugdir.uuids())
self.failUnless(uuids == [], uuids)
uuids = sorted([bug.uuid for bug in bugdir])
self.failUnless(uuids == [], uuids)
bugdir.cleanup()
unitsuite =unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
suite = unittest.TestSuite([unitsuite, doctest.DocTestSuite()])
# def _get_settings(self, settings_path, for_duplicate_bugdir=False):
# allow_no_storage = not self.storage.path_in_root(settings_path)
# if allow_no_storage == True:
# assert for_duplicate_bugdir == True
# if self.sync_with_disk == False and for_duplicate_bugdir == False:
# # duplicates can ignore this bugdir's .sync_with_disk status
# raise DiskAccessRequired("_get settings")
# try:
# settings = mapfile.map_load(self.storage, settings_path, allow_no_storage)
# except storage.NoSuchFile:
# settings = {"storage_name": "None"}
# return settings
# def _save_settings(self, settings_path, settings,
# for_duplicate_bugdir=False):
# allow_no_storage = not self.storage.path_in_root(settings_path)
# if allow_no_storage == True:
# assert for_duplicate_bugdir == True
# if self.sync_with_disk == False and for_duplicate_bugdir == False:
# # duplicates can ignore this bugdir's .sync_with_disk status
# raise DiskAccessRequired("_save settings")
# self.storage.mkdir(self.get_path(), allow_no_storage)
# mapfile.map_save(self.storage, settings_path, settings, allow_no_storage) | PypiClean |
/Cooker-1.0.tar.gz/Cooker-1.0/cooker/sites.py | import urllib2
from cooker.util import alert
from cooker.parser import HTMLTagsParser
class Site():
"""
Base class for all sites
"""
def __init__(self, *args, **kwargs):
pass
def get_data(self, *args, **kwargs):
"""Entry point for fetching data from a site
"""
url = kwargs.get('url', '')
if not url:
url = self.create_url(*args, **kwargs)
html = self.fetch_content(url)
parser = HTMLTagsParser(kwargs.get('tags', []))
parser.feed(html)
parsed_content = parser.get_data()
title, inp, out = self.get_cases(parsed_content)
return { 'name': title,
'path': kwargs.get('path', '.'),
'inp': inp,
'out': out
}
@classmethod
def create_url(cls, *args, **kwargs):
"""Constructs the url for given problem and contest"""
if args or kwargs:
raise NotImplementedError
raise ValueError
@classmethod
def get_cases(cls, parsed_content):
"""Abstract method for getting problem title and test cases
"""
if parsed_content:
raise NotImplementedError
raise ValueError
@classmethod
def submit(cls, *args, **kwargs):
"""Abstract method for submitting a problem
"""
raise NotImplementedError
@classmethod
def fetch_content(cls, url):
"""Fetch contents of the given url
@return String
"""
# Get a file-like object for the url
if not url:
raise ValueError
try:
resp = urllib2.urlopen(url)
return resp.read()
except urllib2.URLError:
# Raise a custom error
alert('Unable to connect to the internet.')
return ''
class CodeChef(Site):
"""
Handles communication with www.codechef.com
"""
def get_data(self, *args, **kwargs):
"""Extend the base class method. Set a few defaults
for the particular site.
"""
kwargs['tags'] = ['title', 'pre']
return Site.get_data(self, *args, **kwargs)
def get_cases(self, parsed_content):
"""Extract the input / output test cases
Needs testing since CodeChef has no fixed format for cases
"""
title = parsed_content['title'][0].split(' |')[0]
title = title.replace(' ', '-').lower()
pre = parsed_content['pre']
inp, out = '', ''
# Hack for one format
if len(pre) == 2:
inp = pre[0]
out = pre[1]
elif len(pre) == 4:
inp = pre[1]
out = pre[3]
return (title, inp, out)
def create_url(self, *args, **kwargs):
"""Constructs the url for given problem and contest
"""
contest = kwargs.get('contest', '').upper()
problem = kwargs.get('problem', '').upper()
if contest:
contest += '/'
base = "http://www.codechef.com"
return "%s/%sproblems/%s" % (base, contest, problem)
def submit(self, *args, **kwargs):
"""Submit a problem to CodeChef"""
raise NotImplementedError | PypiClean |
/LingTree-0.7.tar.gz/LingTree-0.7/py_src/lingtree/tree.py | from builtins import hex
from builtins import str
from builtins import object
import sys
import re
atom_pl = re.compile(r"[a-z][a-zA-Z_0-9]*$")
unwanted_pl = re.compile(r"([\\'])")
def escape_prolog(string):
if not string:
return "''"
if atom_pl.match(string):
return string
res = unwanted_pl.sub(r"\\\1", string)
return "'%s'" % (res,)
unwanted_mrg = re.compile(r"([^A-Za-z0-9\x80-\xff\-_])")
def escape_mrg(string):
if not string:
return ''
return unwanted_mrg.sub(r"\\\1", string)
def bottomup_enumeration(nodes):
# print "bottomup_enumeration: %s"%(nodes)
for n in nodes:
for n1 in bottomup_enumeration(n.children):
yield n1
yield n
def descendants(node):
for n in node.children:
yield n
for n1 in descendants(n):
yield n1
return
def determine_tokenspan(node):
if not node.isTerminal():
assert node.children, (node.cat, node.id)
node.start = min([x.start for x in node.children])
node.end = max([x.end for x in node.children])
class Tree(object):
__slots__ = ['node_table', 'roots', 'terminals', '__dict__']
def __getstate__(self):
return (self.node_table,
self.roots,
self.terminals,
self.__dict__)
def __setstate__(self, state):
self.node_table, self.roots, self.terminals, self.__dict__ = state
"""represents a syntax tree"""
def __init__(self):
self.node_table = {}
self.roots = []
self.terminals = []
def __iter__(self):
return iter(self.roots)
def bottomup_enumeration(self):
return bottomup_enumeration(self.roots)
def topdown_enumeration(self):
for n in self.roots:
yield n
for n1 in descendants(n):
yield n1
return
def determine_tokenspan_all(self):
"determines the tokenspan for all nodes and sorts children accordingly"
for node in self.bottomup_enumeration():
determine_tokenspan(node)
node.children.sort(key=lambda x: x.start)
self.roots.sort(key=lambda x: x.start)
def check_roots(self):
for n in self.roots:
assert n.parent == None
self.check_nodes(n, [])
def check_nodes(self, node, parents):
if node.parent == None:
assert parents == []
else:
assert node.parent == parents[-1]
parents.append(node)
for n in node.children:
assert not n in parents
self.check_nodes(n, parents)
del parents[-1]
def renumber_ids(self, nodes=None, start=500):
"""gives ids to all nonterminal nodes."""
pos = start
if nodes == None:
nodes = self.roots
for n in nodes:
if not n.isTerminal():
# print "Renumber %r: entering %s, pos=%d"%(n.id,pos)
pos = 1+self.renumber_ids(n.children, pos)
#sys.stderr.write("Renumber %r: %s => %d\n"%(n,n.id,pos))
n.id = "%s" % pos
self.node_table[n.id] = n
return pos
def check_nodetable(self):
for key in self.node_table:
if self.node_table[key].id != key:
raise "Nodetable: node %s(%r) has id %s" % (key,
self.node_table[key], self.node_table[key].id)
assert self.node_table[key].id == key
if self.node_table[key].parent == None:
assert self.node_table[key] in self.roots
else:
parent = self.node_table[key].parent
assert self.node_table[parent.id] == parent
def discontinuity(self, nodes, index, sent_node):
"""returns True iff there is a discontinuity between
the Nth and the N+1th member of nodes, ignoring
punctuation and parentheses."""
if nodes[index].end == nodes[index+1].start:
return False
sys.stderr.write('Looking for a discontinuity between %r and %r' % (
self.terminals[nodes[index].end],
self.terminals[nodes[index+1].start]))
for n in self.terminals[nodes[index].end:nodes[index+1].start]:
n1 = n
while n1 != None:
if n1 == sent_node:
return True
n1 = n1.parent
return False
# abstract base class for all nodes
class Node(object):
def __init__(self, cat):
self.id = None
self.start = -1
self.end = -1
self.cat = cat
self.children = []
self.parent = None
def add_at(self, node, pos):
self.children[pos:pos] = [node]
node.set_parent(self)
def append(self, node):
self.children.append(node)
node.set_parent(self)
def insert(self, node):
"inserts a node at the appropriate position"
node.set_parent(self)
for (i, n) in enumerate(self.children):
if (n.start >= node.start):
self.children[i:i] = [node]
return
self.append(node)
def set_parent(self, parent):
self.parent = parent
class NontermNode(Node):
"Node class for nonterminal node"
def __init__(self, cat, edge_label=None):
Node.__init__(self, cat)
self.edge_label = edge_label
self.attr = '--'
def __repr__(self):
stuff = ''
if hasattr(self, 'xml_id'):
stuff += '#'+self.xml_id
stuff += ' at '+hex(id(self))
return '<%s.%s%s>' % (self.cat, self.edge_label, stuff)
def isTerminal(self):
return False
def __str__(self):
return '<NonTerm %s #%s>' % (self.cat, self.id)
def to_penn(self):
if self.edge_label:
a = "(%s.%s " % (self.cat, self.edge_label)
else:
a = "(%s " % (self.cat)
a += ' '.join([x.to_penn() for x in self.children])
a += ")"
return a
def to_full(self, wanted_attrs):
pairs = []
for key in wanted_attrs:
pairs.append('%s=%s' %
(key, escape_mrg(str(getattr(self, key, '--')))))
a = "(%s" % (escape_mrg(self.cat))
if pairs:
a = a+"=#i[%s]" % (' '.join(pairs))
a += " %s)" % (' '.join([x.to_full(wanted_attrs) for x in self.children]),)
return a
class TerminalNode(Node):
"Node class for a preterminal node"
def __init__(self, cat, word, edge_label=None, morph=None):
Node.__init__(self, cat)
self.word = word
self.edge_label = edge_label
self.morph = morph
def __repr__(self):
if hasattr(self, 'xml_id'):
stuff = '#'+self.xml_id
else:
stuff = '(%d)' % (self.start)
return '<%s/%s%s at %s>' % (self.word, self.cat, stuff, hex(id(self)))
def isTerminal(self):
return True
def to_penn(self):
if self.edge_label:
return "(%s.%s %s)" % (self.cat, self.edge_label, self.word)
else:
return "(%s %s)" % (self.cat, self.word)
def to_full(self, wanted_attrs):
pairs = []
for key in wanted_attrs:
pairs.append('%s=%s' %
(key, escape_mrg(str(getattr(self, key, '--')))))
a = "(%s" % (escape_mrg(self.cat),)
if pairs:
a = a+"=#i[%s]" % (' '.join(pairs))
a += " %s)" % (escape_mrg(self.word),)
return a | PypiClean |
/MetaPathways-3.1.6.tar.gz/MetaPathways-3.1.6/metapathways/taxonomy/LCAComputation.py | from __future__ import division
try:
import sys
import traceback
import re
import gzip
import math
from metapathways.utils.utils import *
from metapathways.utils.metapathways_utils import (
fprintf,
printf,
GffFileParser,
getShortORFId,
)
except:
print(""" Could not load some user defined module functions""")
print(traceback.print_exc(10))
sys.exit(3)
def copyList(a, b):
[b.append(x) for x in a]
class LCAComputation:
begin_pattern = re.compile("#")
# initialize with the ncbi tree file
def __init__(self, filenames, megan_map=None):
# a readable taxon name to numeric string id map as ncbi
self.name_to_id = {}
# a readable taxon ncbi tax id to name map
self.id_to_name = {}
# this is the tree structure in a id to parent map, you can traverse it to go to the root
self.taxid_to_ptaxid = {}
self.lca_min_score = 50 # an LCA parameter for min score for a hit to be considered
self.lca_top_percent = 10 # an LCA param to confine the hits to within the top hits score upto the top_percent%
self.lca_min_support = (
5 # a minimum number of reads in the sample to consider a taxon to be present
)
self.results_dictionary = None
self.tax_dbname = "refseq"
self.megan_map = {} # hash between NCBI ID and taxonomic name name
self.accession_to_taxon_map = {} # hash between gi and taxon name
for filename in filenames:
filename_ = correct_filename_extension(filename)
self.loadtreefile(filename)
if megan_map:
self.load_megan_map(megan_map)
def load_megan_map(self, megan_map_file):
megan_map_file = correct_filename_extension(megan_map_file)
with gzip.open(megan_map_file, 'rt') if megan_map_file.endswith('.gz') \
else open(megan_map_file, 'r') as meganfin:
for line in meganfin:
fields = line.split("\t")
fields = list(map(str.strip, fields))
self.megan_map[fields[0]] = fields[1]
def load_accession_to_taxon_map(self, accession_to_taxon_file):
accession_to_taxon_file = correct_filename_extension(accession_to_taxon_file)
with gzip.open(accession_to_taxon_file, 'rt') if accession_to_taxon_file.endswith('.gz') \
else open(accession_to_taxon_file, 'r') as file:
for line in file:
fields = line.split("\t")
fields = map(str.strip, fields)
self.accession_to_taxon_map[fields[1]] = fields[0]
def get_preferred_taxonomy(self, ncbi_id):
ncbi_id = str(ncbi_id)
if ncbi_id in self.megan_map:
exp_lin = self.get_lineage(ncbi_id)
exp_lin.reverse()
name = ""
for lid in exp_lin:
if lid in self.id_to_name:
name += self.id_to_name[lid] + ";"
# decommison old format
# return self.megan_map[ncbi_id] + " (" + str(ncbi_id) + ")"
return name + " (" + str(ncbi_id) + ")"
# think about this
return None
def loadtreefile(self, tree_filename):
tree_filename = correct_filename_extension(tree_filename)
with gzip.open(tree_filename, 'rt') if tree_filename.endswith('.gz') \
else open(tree_filename, 'r') as taxonomy_file:
lines = taxonomy_file.readlines()
for line in lines:
if self.begin_pattern.search(line):
continue
fields = [x.strip() for x in line.rstrip().split("\t")]
if len(fields) != 3:
continue
if str(fields[0]) not in self.id_to_name:
self.name_to_id[str(fields[0])] = str(fields[1])
self.id_to_name[str(fields[1])] = str(fields[0])
# the taxid to ptax map has for each taxid a corresponding 3-tuple
# the first location is the pid, the second is used as a counter for
# lca while a search is traversed up the tree and the third is used for
# the min support
self.taxid_to_ptaxid[str(fields[1])] = [str(fields[2]), 0, 0]
def setParameters(self, min_score, top_percent, min_support):
self.lca_min_score = min_score
self.lca_top_percent = top_percent
self.lca_min_support = min_support
def sizeTaxnames(self):
return len(self.name_to_id)
def sizeTaxids(self):
return len(self.taxid_to_ptaxid)
def get_a_Valid_ID(self, name_group):
for name in name_group:
if name in self.name_to_id:
return self.name_to_id[name]
return -1
# given a taxon name it returns the correcponding unique ncbi tax id
def translateNameToID(self, name):
if not name in self.name_to_id:
return None
return self.name_to_id[name]
# given a taxon id to taxon name map
def translateIdToName(self, id):
if not id in self.id_to_name:
return None
return self.id_to_name[id]
# given a name it returns the parents name
def getParentName(self, name):
if not name in self.name_to_id:
return None
id = self.name_to_id[name]
pid = self.getParentTaxId(id)
return self.translateIdToName(pid)
# given a ncbi tax id returns the parents tax id
def getParentTaxId(self, ID):
if not ID in self.taxid_to_ptaxid:
return None
return self.taxid_to_ptaxid[ID][0]
# given a set of ids it returns the lowest common ancenstor
# without caring about min support
# here LCA for a set of ids are computed as follows
# first we consider one ID at a time
# for each id we traverse up the ncbi tree using the id to parent id map
# at the same time increasing the count on the second value of the 3-tuple
# note that at the node where all the of the individual ids ( limit in number)
# converges the counter matches the limit for the first time, while climbing up.
# This also this enables us to make the selection of id arbitrary
def get_lca(self, IDs, return_id=False):
limit = len(IDs)
for id in IDs:
tid = id
while tid in self.taxid_to_ptaxid and tid != "1":
self.taxid_to_ptaxid[tid][1] += 1
if self.taxid_to_ptaxid[tid][1] == limit:
if return_id:
return tid
else:
return self.id_to_name[tid]
tid = self.taxid_to_ptaxid[tid][0]
if return_id:
return 1
return "root"
def update_taxon_support_count(self, taxonomy):
id = self.get_a_Valid_ID([taxonomy])
tid = id
while tid in self.taxid_to_ptaxid and tid != "1":
self.taxid_to_ptaxid[tid][2] += 1
tid = self.taxid_to_ptaxid[tid][0]
def get_supported_taxon(self, taxonomy, return_id=False):
id = self.get_a_Valid_ID([taxonomy])
tid = id
# i =0
while tid in self.taxid_to_ptaxid and tid != "1":
# print str(i) + ' ' + self.translateIdToName(tid)
if self.lca_min_support > self.taxid_to_ptaxid[tid][2]:
tid = self.taxid_to_ptaxid[tid][0]
else:
if return_id:
return tid
else:
return self.translateIdToName(tid)
# i+=1
if return_id:
return tid
else:
return self.translateIdToName(tid)
# need to call this to clear the counts of reads at every node
def clear_cells(self, IDs):
limit = len(IDs)
for id in IDs:
tid = id
while tid in self.taxid_to_ptaxid and tid != "1":
# if self.taxid_to_ptaxid[tid][1]==0:
# return self.id_to_name[tid]
self.taxid_to_ptaxid[tid][1] = 0
tid = self.taxid_to_ptaxid[tid][0]
return ""
# given a set of sets of names it computes an lca
# in the format [ [name1, name2], [name3, name4,....namex] ...]
# here name1 and name2 are synonyms and so are name3 through namex
def getTaxonomy(self, name_groups, return_id=False):
IDs = []
for name_group in name_groups:
id = self.get_a_Valid_ID(name_group)
if id != -1:
IDs.append(id)
consensus = self.get_lca(IDs, return_id)
self.clear_cells(IDs)
return consensus
# extracts taxon names for a refseq annotation
def get_species(self, hit):
accession_PATT = re.compile(r"ref\|(.*)\|")
if not "product" in hit and not "target" in hit:
return None
species = []
try:
# extracting taxon names here
# if 'target' in hit:
# gires = accession_PATT.search(hit['target'])
# if gires:
# gi = gires.group(1)
# if gi in self.accession_to_taxon_map:
# species.append(self.accession_to_taxon_map[gi])
# else:
m = re.findall(r"\[([^\[]+?)\]", hit["product"])
if m != None:
copyList(m, species)
# print hit['product']
# print species
except:
return None
if species and species != "":
return species
else:
return None
# used for optimization
def set_results_dictionary(self, results_dictionary):
self.results_dictionary = results_dictionary
# this returns the megan taxonomy, i.e., it computes the lca but at the same time
# takes into consideration the parameters, min score, min support and top percent
def getMeganTaxonomy(self, orfid):
# compute the top hit wrt score
names = []
species = []
if self.tax_dbname in self.results_dictionary:
if orfid in self.results_dictionary[self.tax_dbname]:
top_score = 0
for hit in self.results_dictionary[self.tax_dbname][orfid]:
if (
hit["bitscore"] >= self.lca_min_score
and hit["bitscore"] >= top_score
):
top_score = hit["bitscore"]
for hit in self.results_dictionary[self.tax_dbname][orfid]:
if (100 - self.lca_top_percent) * top_score / 100 < hit["bitscore"]:
names = self.get_species(hit)
if names:
species.append(names)
taxonomy = self.getTaxonomy(species)
meganTaxonomy = self.get_supported_taxon(taxonomy)
return meganTaxonomy
# this is use to compute the min support for each taxon in the tree
# this is called before the getMeganTaxonomy
def compute_min_support_tree(self, annotate_gff_file, pickorfs, dbname="refseq"):
# print 'dbname' , dbname
self.tax_dbname = dbname
gffreader = GffFileParser(annotate_gff_file)
# print 'done'
try:
# if dbname=='refseq-nr-2014-01-18':
# print 'refseq', len(pickorfs)
for contig in gffreader:
# if dbname=='refseq-nr-2014-01-18':
# print 'refseq', contig
for orf in gffreader.orf_dictionary[contig]:
shortORFId = getShortORFId(orf["id"])
if re.search(r"Xrefseq", dbname):
print("refseq", contig, shortORFId, self.tax_dbname)
# print shortORFId, orf['id']
if not shortORFId in pickorfs:
continue
# if dbname=='refseq-nr-2014-01-18':
# print 'refseq', contig , shortORFId
# print ">", shortORFId, orf['id']
taxonomy = None
species = []
if self.tax_dbname in self.results_dictionary:
if re.search(r"Xrefseq", dbname):
print("hit", len(self.results_dictionary[self.tax_dbname]))
print(self.results_dictionary[self.tax_dbname].keys())
if shortORFId in self.results_dictionary[self.tax_dbname]:
# compute the top hit wrt score
top_score = 0
for hit in self.results_dictionary[self.tax_dbname][
shortORFId
]:
# print hit #,hit['bitscore'], self.lca_min_score, top_score
if (
hit["bitscore"] >= self.lca_min_score
and hit["bitscore"] >= top_score
):
top_score = hit["bitscore"]
# if dbname=='refseq-nr-2014-01-18':
# print 'hit', hit
for hit in self.results_dictionary[self.tax_dbname][
shortORFId
]:
if (100 - self.lca_top_percent) * top_score / 100 < hit[
"bitscore"
]:
names = self.get_species(hit)
if names:
species.append(names)
# print self.results_dictionary[dbname][shortORFId][0]['product']
# print orf['id']
# print orf['id'], species
# print orf['id'], len(self.results_dictionary[dbname][shortORFId]), species
taxonomy = self.getTaxonomy(species)
# taxonomy_id = self.getTaxonomy(species, return_id=True)
# print taxonomy
# print taxonomy_id
# print taxonomy, orf['id'], species
self.update_taxon_support_count(taxonomy)
# preferred_taxonomy = self.get_preferred_taxonomy(taxonomy_id)
# print taxonomy
# print preferred_taxonomy
pickorfs[shortORFId] = taxonomy
except:
import traceback
traceback.print_exc()
print("ERROR : Cannot read annotated gff file ")
## Weighted Taxonomic Distnace (WTD)
# Implementation of the weighted taxonomic distance as described in
# Metabolic pathways for the whole community. Hanson et al. (2014)
# monotonicly decreasing function of depth of divergence d
def step_cost(self, d):
return 1 / math.pow(2, d)
# weighted taxonomic distance between observed and expected taxa
def wtd(self, exp, obs):
exp_id = exp
obs_id = obs
exp_lin = self.get_lineage(exp_id)
obs_lin = self.get_lineage(obs_id)
sign = -1
# check to see if expected in observed lineage
# if so distance sign is positive
if exp_id in obs_lin:
sign = 1
large = None
if len(obs_lin) <= len(exp_lin):
# expected longer than observed
large = exp_lin
small = obs_lin
else:
large = obs_lin
small = exp_lin
# calculate cost
a_cost = 0
b_cost = 0
for i in range(len(large)):
if i > 0:
a_cost += self.step_cost(len(large) - i - 1)
b_cost = 0
for j in range(len(small)):
if j > 0:
b_cost += self.step_cost(len(small) - j - 1)
if large[i] == small[j]:
return (a_cost + b_cost) * sign
return None # did not find lineages
# given an ID gets the lineage
def get_lineage(self, id):
tid = str(id)
lineage = []
lineage.append(tid)
while tid in self.taxid_to_ptaxid and tid != "1":
lineage.append(self.taxid_to_ptaxid[tid][0])
tid = self.taxid_to_ptaxid[tid][0]
return lineage | PypiClean |
/Clinamen2-2023.5.1.tar.gz/Clinamen2-2023.5.1/clinamen2/cmaes/termination_criterion.py | from abc import ABC, abstractmethod
from typing import NamedTuple, Sequence
import numpy.typing as npt
from clinamen2.cmaes.params_and_state import (
AlgorithmParameters,
AlgorithmState,
)
class StaleLossState(NamedTuple):
"""NamedTuple to keep track of the state of a criterion.
Args:
counter: A variable to keep track of relevant steps.
compare_to: A reference value to compare to.
"""
counter: int = 0
compare_to: float = None
class CriteriaCombinationState(NamedTuple):
"""NamedTuple to keep track of a tuple of criterion states.
Args:
criteria_states: Tuple containing criteria states.
"""
criteria_states: tuple
# derived state NamedTuples
StaleStepState = StaleLossState
StaleStdState = StaleLossState
CriteriaAndState = CriteriaCombinationState
CriteriaOrState = CriteriaCombinationState
class Criterion(ABC):
"""Abstract base class for termination criteria.
Args:
parameters: Initial, immutable parameters of the CMA-ES run.
"""
def __init__(self, parameters: AlgorithmParameters):
"""Constructor"""
self.parameters = parameters
@abstractmethod
def init(self) -> NamedTuple:
"""
Initialize the associated state.
Returns:
The initial state of the Criterion.
"""
@abstractmethod
def update(
self,
criterion_state: NamedTuple,
state: AlgorithmState,
population: npt.ArrayLike,
loss: npt.ArrayLike,
) -> NamedTuple:
"""Function to update and return the Criterion
Args:
criterion_state: Current state of the Criterion.
state: Current state of the evolution.
population: Current generation of individuals.
loss: Loss of each of the current individuals.
Returns:
The updated state of the Criterion.
"""
@abstractmethod
def met(self, criterion_state: NamedTuple) -> bool:
"""
Args:
criterion_state: State of criterion to base decision on.
Returns:
True if the Criterion is fulfilled, False if not.
"""
class StaleLossCriterion(Criterion):
"""Class that implements a termination criterion of the CMA-ES.
Takes the loss trajectory into account. If the loss is stale for a
given number of generations, the criterion is met.
Args:
parameters: Initial, immutable parameters of the CMA-ES run.
threshold: Difference up to which two different loss values are
considered equal.
generations: Number of generations for which the loss has to be
stale for the criterion to be met.
"""
def __init__(
self,
parameters: AlgorithmParameters,
threshold: float,
generations: int,
):
self.threshold = threshold
self.generations = generations
super().__init__(parameters=parameters)
def init(self) -> StaleLossState:
"""Initialize the associated CriterionState.
Use base CriterionState and set counter to zero.
"""
return StaleLossState(counter=0, compare_to=None)
def update(
self,
criterion_state: StaleLossState,
state: AlgorithmState,
population: npt.ArrayLike,
loss: npt.ArrayLike,
) -> StaleLossState:
"""Function to update and return the Criterion
Args:
criterion_state: Current state of the Criterion.
state: Current state of the evolution.
population: Current generation of individuals.
loss: Loss of each of the current individuals.
Returns:
The updated state of the Criterion.
"""
compare_to = loss.min()
if (
criterion_state.compare_to is not None
and abs(criterion_state.compare_to - loss.min()) < self.threshold
):
counter = criterion_state.counter + 1
else:
counter = 0
return StaleLossState(counter=counter, compare_to=compare_to)
def met(self, criterion_state: StaleLossState) -> bool:
"""
Args:
criterion_state: State of criterion to base decision on.
Returns:
True if the Criterion is fulfilled, False if not.
"""
return criterion_state.counter >= self.generations
class StaleStepCriterion(Criterion):
"""Class that implements a termination criterion of the CMA-ES.
Takes the step size trajectory into account. If the step size is stale for
a given number of generations, the criterion is met.
Args:
parameters: Initial, immutable parameters of the CMA-ES run.
threshold: Difference up to which two different step sizes are
considered equal.
generations: Number of generations for which the step size needs to
be stale for the criterion to be met.
"""
def __init__(
self,
parameters: AlgorithmParameters,
threshold: float,
generations: int,
):
self.threshold = threshold
self.generations = generations
super().__init__(parameters=parameters)
def init(self) -> StaleStepState:
"""Initialize the associated CriterionState.
Use base CriterionState and set counter to zero.
"""
return StaleStepState(counter=0, compare_to=None)
def update(
self,
criterion_state: StaleStepState,
state: AlgorithmState,
population: npt.ArrayLike,
loss: npt.ArrayLike,
) -> StaleStepState:
"""Function to update and return the Criterion
Args:
criterion_state: Current state of the Criterion.
state: Current state of the evolution.
population: Current generation of individuals.
loss: Loss of each of the current individuals.
Returns:
The updated state of the Criterion.
"""
compare_to = state.step_size
if (
criterion_state.compare_to is not None
and abs(criterion_state.compare_to - state.step_size)
< self.threshold
):
counter = criterion_state.counter + 1
else:
counter = 0
return StaleStepState(counter=counter, compare_to=compare_to)
def met(self, criterion_state: StaleStepState) -> bool:
"""
Args:
criterion_state: State of criterion to base decision on.
Returns:
True if the Criterion is fulfilled, False if not.
"""
return criterion_state.counter >= self.generations
class StaleStdCriterion(Criterion):
"""Class that implements a termination criterion of the CMA-ES.
Takes the standard deviation within generations into account. If the std
is below a threshold for a given number of generations, the criterion is
met.
Args:
parameters: Initial, immutable parameters of the CMA-ES run.
threshold: Threshold value for std to fall below.
generations: Number of generations for which std needs to remain below
threshold for the criterion to be met.
"""
def __init__(
self,
parameters: AlgorithmParameters,
threshold: float,
generations: int,
):
self.threshold = threshold
self.generations = generations
super().__init__(parameters=parameters)
def init(self) -> StaleStdState:
"""Initialize the associated CriterionState.
Use base CriterionState and set counter to zero.
"""
return StaleStdState(counter=0, compare_to=None)
def update(
self,
criterion_state: StaleStdState,
state: AlgorithmState,
population: npt.ArrayLike,
loss: npt.ArrayLike,
) -> StaleLossState:
"""Function to update and return the Criterion
Args:
criterion_state: Current state of the Criterion.
state: Current state of the evolution.
population: Current generation of individuals.
loss: Loss of each of the current individuals.
Returns:
The updated state of the Criterion.
"""
if loss.std() < self.threshold:
counter = criterion_state.counter + 1
else:
counter = 0
# this criterion does not use `compare_to`
return StaleLossState(counter=counter, compare_to=0.0)
def met(self, criterion_state: StaleStdState) -> bool:
"""
Args:
criterion_state: State of criterion to base decision on.
Returns:
True if the Criterion is fulfilled, False if not.
"""
return criterion_state.counter >= self.generations
class CriteriaCombination(Criterion, ABC):
"""Abstract class that combines criteria.
Args:
parameters: Initial, immutable parameters of the CMA-ES run.
criteria: Sequence of criteria to be combined.
"""
def __init__(
self,
parameters: AlgorithmParameters,
criteria: Sequence,
):
self.criteria = criteria
super().__init__(parameters=parameters)
def init(self) -> Sequence:
"""Initialize the associated CriterionState instances."""
criteria_states = []
for criterion in self.criteria:
criteria_states.append(criterion.init())
return CriteriaCombinationState(criteria_states=tuple(criteria_states))
def update(
self,
criterion_state: CriteriaCombinationState,
state: AlgorithmState,
population: npt.ArrayLike,
loss: npt.ArrayLike,
) -> CriteriaCombinationState:
"""Function to update and return the criteria
Args:
criterion_state: NamedTuple containing tuple of criteria states.
state: Current state of the evolution.
population: Current generation of individuals.
loss: Loss of each of the current individuals.
Returns:
NamedTuple with tuple of the updated states of the criteria.
"""
criteria_states = [
criterion.update(
criterion_state=criterion_state.criteria_states[c],
state=state,
population=population,
loss=loss,
)
for c, criterion in enumerate(self.criteria)
]
return CriteriaCombinationState(criteria_states=tuple(criteria_states))
@abstractmethod
def met(self, criterion_state: CriteriaCombinationState) -> bool:
"""
Args:
criterion_state: NamedTuple containing tuple of criteria states.
Returns:
True if criteria combination is fulfilled, False otherwise.
"""
class CriteriaAnd(CriteriaCombination):
"""Class that combines criteria that all have to be fulfilled.
Evaluates multiple criteria (instances of Criterion). All have to meet
their respective parameters for the CriterionAnd to be met.
Args:
parameters: Initial, immutable parameters of the CMA-ES run.
criteria: Sequence of criteria to be combined.
"""
def met(self, criterion_state: CriteriaAndState) -> bool:
"""
Args:
criterion_state: NamedTuple containing tuple of criteria states.
Returns:
True if all criteria are fulfilled, False if any is not.
"""
for c, criterion in enumerate(self.criteria):
if not criterion.met(criterion_state.criteria_states[c]):
return False
return True
class CriteriaOr(CriteriaCombination):
"""Class that combines criteria were one has to be fulfilled.
Evaluates multiple criteria (instances of Criterion). Any one has to meet
their respective parameters for the CriterionOr to be met.
Args:
parameters: Initial, immutable parameters of the CMA-ES run.
criteria: Sequence of criteria to be combined.
"""
def met(self, criterion_state: CriteriaOrState) -> bool:
"""
Args:
criterion_state: NamedTuple containing tuple of criteria states.
Returns:
True if any criteria is fulfilled, False if none are.
"""
for c, criterion in enumerate(self.criteria):
if criterion.met(criterion_state.criteria_states[c]):
return True
return False | PypiClean |
/Hosein_Tabr-4.7.5.tar.gz/Hosein_Tabr-4.7.5/Hosein_Tabr/Robino.py | from requests import get,post
from pyrubika.encryption import encryption
from random import randint, choice
from Hosein_Tabr.Copyright import copyright
class rubino:
def __init__(self, auth):
self.auth = auth
self.print = copyright.CopyRight
def _getUrl():
return f'https://rubino{randint(1,30)}.iranlms.ir/'
def _request(self,inData,method):
data = {"api_version": "0","auth": self.auth,"client": {"app_name": "Main","app_version": "3.0.2","lang_code": "fa","package": "app.rbmain.a","platform": "Android"},"data": inData,"method": method}
while True:
try:
return post(rubino._getUrl(),json=data).json()
except:
continue
def follow(self,followee_id,profile_id=None):
inData = {"f_type": "Follow","followee_id": followee_id,"profile_id": profile_id}
method = 'requestFollow'
while True:
try:
return self._request(inData,method)
except:continue
def getPostByShareLink(self,link,profile_id=None):
inData = {"share_string":link,"profile_id":profile_id}
method = "getPostByShareLink"
while True:
try:
return self._request(inData,method).get('data')
except:continue
def addPostViewCount(self,post_id,post_target_id):
inData = {"post_id":post_id,"post_profile_id":post_target_id}
method = "addPostViewCount"
while True:
try:
return self._request(inData,method)
except:continue
def getStoryIds(self,target_profile_id,profile_id=None):
inData = {"profile_id":profile_id,"target_profile_id":target_profile_id}
method = 'getStoryIds'
while True:
try:
return self._request(inData,method)
except:continue
def updateProfile(self,profile_id=None):
inData = {"profile_id":profile_id,"profile_status":"Public"}
method = 'updateProfile'
while True:
try:
return self._request(inData,method)
except:continue
def getRecentFollowingPosts(self,profile_id=None):
inData = {"equal":False,"limit":30,"sort":"FromMax","profile_id":profile_id}
method = 'getRecentFollowingPosts'
while True:
try:
return self._request(inData,method)
except:continue
def getProfileList(self):
inData = {"equal":False,"limit":10,"sort":"FromMax"}
method = 'getProfileList'
while True:
try:
return self._request(inData,method)
except:continue
def getMyProfileInfo(self,profile_id=None):
inData = {"profile_id":profile_id}
method = 'getMyProfileInfo'
while True:
try:
return self._request(inData,method)
except:continue
def Like(self,post_id,target_post,prof=None):
inData ={"action_type":"Like","post_id":post_id,"post_profile_id":target_post,"profile_id":prof}
method = 'likePostAction'
while True:
try:
return self._request(inData,method)
except:continue
def getShareLink(self):
inData = {"post_id":post_id,"post_profile_id":post_profile,"profile_id":prof}
method = 'getShareLink'
while True:
try:
return self._request(inData,method)
except:continue
def addViewStory(self,story,ids,prof=None):
indata = {"profile_id":prof,"story_ids":[ids],"story_profile_id":story}
method = 'addViewStory'
while True:
try:
return self._request(indata,method)
except:continue
def createPage(self,name,username,bio=None):
inData = {"bio": bio,"name": name,"username": username}
method = 'createPage'
while True:
try:
return self._request(inData,method)
except:continue
def comment(self,text,poat_id,post_target,prof=None):
inData = {"content": text,"post_id": poat_id,"post_profile_id": post_target,"rnd":f"{randint(100000,999999999)}" ,"profile_id":prof}
method = 'addComment'
while True:
try:
return self._request(inData,method)
except:continue
def UnLike(self,post_id,post_profile_id,prof=None):
inData = {"action_type":"Unlike","post_id":post_id,"post_profile_id":post_profile_id,"profile_id":prof}
method ='likePostAction'
while True:
try:
return self._request(inData,method)
except:continue
def sevaePost(self,post_id,post_profile_id,prof=None):
inData = {"action_type":"Bookmark","post_id":post_id,"post_profile_id":post_profile_id,"profile_id":prof}
method ='postBookmarkAction'
while True:
try:
return self._request(inData,method)
except:continue | PypiClean |
/CoDocParser-0.2.49.tar.gz/CoDocParser-0.2.49/docparser/implements/html_document_parser.py | import os
from lxml import etree
from docparser.core.document_parser_base import DocumentParserBase
class HtmlDocumentParser(DocumentParserBase):
"""
Html文档解析器
"""
def __init__(self, file, configs):
"""
初始化
:param file:文件路径
:param configs: 配置
"""
self._file = file
self._configs = configs
if not os.path.exists(file):
raise FileNotFoundError
self._html = etree.parse(file, etree.HTMLParser())
def parse(self):
"""
根据配置解析数据
:return: 返回抽取的数据
"""
data = {}
errors = {}
for key in self._configs.keys():
item_config = self._configs[key]
if item_config["type"] == 'table':
errs = self._pre_check_and_process_table_config(item_config)
if len(errs.keys()) > 0:
errors[key] = errs
else:
data[key] = self._extract_table(item_config)
else:
text, errs = self._extract_text(item_config)
if errs and len(errs.keys()) > 0:
errors[key] = errs
else:
data[key] = text
return data, errors
if __name__ == '__main__':
converter = HtmlDocumentParser(
r"C:\Users\86134\Desktop\projects\email\653\cmacgm_noticeofarrival_csclyellowsea_0bh9ew1ma-at210727102222_769469_000019.xlsx",
{
# "vessel_name": {
# "type": "text",
# "rect": {
# "left_keyword": "VESSEL:",
# "right_keyword": "VOYAGE:",
# "bottom_keyword": "OPERATIONAL DISCH. PORT: PLACE"
# },
# "pattern": ".*"
# },
# "pod_eta": {
# "type": "text",
# "rect": {
# "left_keyword": "POD ETA:",
# "bottom_keyword": "FPD ETA:"
# },
# "pattern": ".*"
# },
# "it_number:": {
# "type": "text",
# "rect": {
# "left_keyword": "IT NUMBER:",
# "right_keyword": "PLACE OF ISSUE:",
# },
# "pattern": ".*"
# },
# "it_issued_date:": {
# "type": "text",
# "rect": {
# "left_keyword": "IT ISSUED DATE:",
# },
# "pattern": ".*"
# },
# "firms_code": {
# "type": "text",
# "rect": {
# "left_keyword": "FIRMS CODE:",
# },
# "pattern": ".*"
# },
# "shipper": {
# "type": "text",
# "rect": {
# "top_keyword": "SHIPPER",
# "bottom_keyword": "PLEASE NOTE :",
# },
# "pattern": ".*"
# }
"containers": {
"type": "table",
"extractor": "mixed",
"max_rows": 1,
"row_split_ref_col_name": "container_no",
"col_split_chars": " ",
"rect": {
"top": {
"keyword": "CONTAINER # ",
"include": True
},
"bottom": {
"keyword": "PLEASE NOTE :",
}
},
"columns": [
{
"name": "container_no",
"title": "CONTAINER #",
"title_h_align": "center",
"title_v_align": "middle",
"content_pattern": "\\w{0,20}",
}, {
"name": "seal_no",
"title": "SEAL #",
"title_h_align": "center",
"title_v_align": "middle",
"content_pattern": "\\w{0,20}",
}, {
"name": "container_size_type",
"title": "SIZE/TYPE #",
"title_h_align": "center",
"title_v_align": "middle",
"content_pattern": "\\d{1,10}\\s{1,2}\\[a-z|A-Z]{2,5}",
}, {
"name": "weight",
"title": "WEIGHT",
"title_h_align": "center",
"title_v_align": "middle",
"content_pattern": "\\d{0,10}",
}, {
"name": "measure",
"title": "MEASURE",
"title_h_align": "center",
"title_v_align": "middle",
"content_pattern": "\\w{0,5}",
}, {
"name": "free_business_last_free",
"title": "FREE BUSINESS LAST FREE",
"title_h_align": "center",
"title_v_align": "middle",
"childrens": [
{
"name": "day_at_port",
"title": "DAYS AT PORT",
"title_h_align": "center",
"title_v_align": "middle",
"content_pattern": "\\w{0,20}",
},
{
"name": "day_at_ramp",
"title": "DAY AT RAMP",
"title_h_align": "center",
"title_v_align": "middle",
"content_pattern": "\\d{1,2}/\\d{1,2}/\\d{1,2}",
}
]
}, {
"name": "pickup_no",
"title": "PICKUP #",
"title_h_align": "center",
"title_v_align": "middle",
"content_pattern": "\\w{0,20}",
},
]
}
})
data = converter.extract()
print(data) | PypiClean |
/Bitcoin_Price-0.3.tar.gz/Bitcoin_Price-0.3/BitcoinPrice/Bitcoin_Price.py | import requests
import time
import sys
from datetime import datetime
def help():
print("Format is : Bitcoin_Price command price")
print("The following command is acceptable:")
print("\tn : if you do not want to set any bound")
print("\tl : if you want to set only Lower Bound.")
print("\tu : if you want to set only Upper Bound.")
print("\tb : if you want to set both bounds.")
print("\t Lower Bound should come before Upper Bound")
# Notification function if bitcoin price falls from given lower bound.
def emergency(value):
data = {"value1": value}
url = 'https://maker.ifttt.com/trigger/emergency_message/with/key/jQMPZa_jNCtD7FIGhD9uohInvAIVB7-_XI1H_GCh2xD'
requests.post(url, json=data)
# Notification function if bitcoin price cross the upper bound
def bounce(value):
data = {"value1": value}
url = 'https://maker.ifttt.com/trigger/bounce/with/key/jQMPZa_jNCtD7FIGhD9uohInvAIVB7-_XI1H_GCh2xD'
requests.post(url, json=data)
# Regular updating of price of bitcoin.
def update(price_list):
price_list = "<br>".join(price_list)
data = {"value1": price_list}
url = 'https://maker.ifttt.com/trigger/bitcoin_price/with/key/jQMPZa_jNCtD7FIGhD9uohInvAIVB7-_XI1H_GCh2xD'
requests.post(url, json=data)
# Function for taking price of bitcoin from API.
def getting_price():
response = (requests.get('https://api.coindesk.com/v1/bpi/currentprice.json')).json()
value = response['bpi']['USD']['rate']
value = float(value.replace(",", ""))
date_time = datetime.now().strftime("%D %H:%M")
return [date_time, value]
# Function for governing all type of notifications.
def notify(Lower_bound, Upper_bound):
# Two variable for keeping time of last notifiaction of out of range price.
time_lower = 0
time_upper = 0
count1 = 0
count2 = 0
price_list = [] # For saving 5 prices of bitcoin of different time.
# Infinte Loop
while True:
price = getting_price() # For getting price of bitcoin, function will return a list [price, time].
if price[1] < Lower_bound and Lower_bound != 0: # Checking if price is lower than lower bound and if user set the lower bound or not.
price[1] = "<i>{}</i>".format(price[1]) # The price of bitcoin will shows in Italic.
if count1 == 0 or time_lower >= 3600: # If time for last calling of emergency function is more than or equal to 1 hour, it will call again.
emergency(price[1])
count1 = 1
time_lower = 0 # Setting time again to 0.
elif price[1] > Upper_bound and Upper_bound != 0: # Checking if price is more than upper bound and if user set upper bound or not.
price[1] = "<b>{}</b>".format(price[1]) # The price of bitcoin will shows in Bold.
if count2 == 0 or time_upper >= 3600: # If time for last calling of bounce function is more than or equal to 1 hour, it will call again.
bounce(price[1])
count2 = 1
time_upper = 0 # Setting time again to 0.
price = "{}: ${}".format(price[0], price[1]) # Making format in "Date Time: $Price".
price_list.append(price)
if len(price_list) >= 5: # If we get 5 values then we will call update function.
update(price_list)
price_list = [] # Emptying List.
# keeping track of time.
time_lower += 60*5
time_upper += 60*5
time.sleep(60*5) # Stoping program for 5 minutes
# We are taking upper and lower bound so chceking if lower bound is lower than upper bound or not.
def main():
if 2 <= len(sys.argv) <= 4:
if sys.argv[1] == "n":
Lower_bound = 0
Upper_bound = 0
notify(Lower_bound, Upper_bound)
elif sys.argv[1] == "l":
Lower_bound = int(sys.argv[2])
Upper_bound = 0
notify(Lower_bound, Upper_bound)
elif sys.argv[1] == "u":
Lower_bound = 0
Upper_bound = int(sys.argv[2])
notify(Lower_bound, Upper_bound)
elif sys.argv[1] == "b":
Lower_bound = int(sys.argv[2])
Upper_bound = int(sys.argv[3])
if Lower_bound > Upper_bound and Upper_bound != 0:
print("Lower Bound should come before than Upper Bound.")
else:
notify(Lower_bound, Upper_bound)
elif sys.argv[1] == "h":
help()
else:
help()
else:
help()
if __name__ == "__main__":
main() | PypiClean |
/B9gemyaeix-4.14.1.tar.gz/B9gemyaeix-4.14.1/weblate/static/editor/zen.js | (function () {
var EditorBase = WLT.Editor.Base;
var $window = $(window);
var $document = $(document);
function ZenEditor() {
EditorBase.call(this);
$window.scroll(() => {
var $loadingNext = $("#loading-next");
var loader = $("#zen-load");
if ($window.scrollTop() >= $document.height() - 2 * $window.height()) {
if (
$("#last-section").length > 0 ||
$loadingNext.css("display") !== "none"
) {
return;
}
$loadingNext.show();
loader.data("offset", 20 + parseInt(loader.data("offset"), 10));
$.get(
loader.attr("href") + "&offset=" + loader.data("offset"),
(data) => {
$loadingNext.hide();
$(".zen tfoot").before(data);
this.init();
initHighlight(document);
}
);
}
});
/*
* Ensure current editor is reasonably located in the window
* - show whole element if moving back
* - scroll down if in bottom half of the window
*/
$document.on("focus", ".zen .translation-editor", function () {
var current = $window.scrollTop();
var rowOffset = $(this).closest("tbody").offset().top;
if (rowOffset < current || rowOffset - current > $window.height() / 2) {
$([document.documentElement, document.body]).animate(
{
scrollTop: rowOffset,
},
100
);
}
});
$document.on("change", ".translation-editor", handleTranslationChange);
$document.on("change", ".fuzzy_checkbox", handleTranslationChange);
$document.on("change", ".review_radio", handleTranslationChange);
Mousetrap.bindGlobal("mod+end", function (e) {
$(".zen-unit:last").find(".translation-editor:first").focus();
return false;
});
Mousetrap.bindGlobal("mod+home", function (e) {
$(".zen-unit:first").find(".translation-editor:first").focus();
return false;
});
Mousetrap.bindGlobal("mod+pagedown", function (e) {
var focus = $(":focus");
if (focus.length === 0) {
$(".zen-unit:first").find(".translation-editor:first").focus();
} else {
focus
.closest(".zen-unit")
.next()
.find(".translation-editor:first")
.focus();
}
return false;
});
Mousetrap.bindGlobal("mod+pageup", function (e) {
var focus = $(":focus");
if (focus.length === 0) {
$(".zen-unit:last").find(".translation-editor:first").focus();
} else {
focus
.closest(".zen-unit")
.prev()
.find(".translation-editor:first")
.focus();
}
return false;
});
$window.on("beforeunload", function () {
if ($(".translation-modified").length > 0) {
return gettext(
"There are some unsaved changes, are you sure you want to leave?"
);
}
});
}
ZenEditor.prototype = Object.create(EditorBase.prototype);
ZenEditor.prototype.constructor = ZenEditor;
ZenEditor.prototype.init = function () {
EditorBase.prototype.init.call(this);
/* Minimal height for side-by-side editor */
$(".zen-horizontal .translator").each(function () {
var $this = $(this);
var tdHeight = $this.height();
var editorHeight = 0;
var contentHeight = $this.find("form").height();
var $editors = $this.find(".translation-editor");
$editors.each(function () {
var $editor = $(this);
editorHeight += $editor.height();
});
/* There is 10px padding */
$editors.css(
"min-height",
(tdHeight - (contentHeight - editorHeight - 10)) / $editors.length +
"px"
);
});
};
/* Handlers */
function handleTranslationChange() {
var $this = $(this);
var $row = $this.closest("tr");
var checksum = $row.find("[name=checksum]").val();
var statusdiv = $("#status-" + checksum);
/* Wait until previous operation on this field is completed */
if (statusdiv.hasClass("unit-state-saving")) {
setTimeout(function () {
$this.trigger("change");
}, 100);
return;
}
$row.addClass("translation-modified");
var form = $row.find("form");
statusdiv.addClass("unit-state-saving");
var payload = form.serialize();
if (payload == statusdiv.data("last-payload")) {
return;
}
statusdiv.data("last-payload", payload);
$.ajax({
type: "POST",
url: form.attr("action"),
data: payload,
dataType: "json",
error: function (jqXHR, textStatus, errorThrown) {
addAlert(errorThrown);
},
success: function (data) {
statusdiv.attr("class", "unit-state-cell " + data.unit_state_class);
statusdiv.attr("title", data.unit_state_title);
$.each(data.messages, function (i, val) {
addAlert(val.text, val.kind);
});
$row.removeClass("translation-modified").addClass("translation-saved");
if (data.translationsum !== "") {
$row.find("input[name=translationsum]").val(data.translationsum);
}
},
});
}
document.addEventListener("DOMContentLoaded", function () {
new ZenEditor();
});
})(); | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_zh-hk.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"\u4e0a\u5348",
"\u4e0b\u5348"
],
"DAY": [
"\u661f\u671f\u65e5",
"\u661f\u671f\u4e00",
"\u661f\u671f\u4e8c",
"\u661f\u671f\u4e09",
"\u661f\u671f\u56db",
"\u661f\u671f\u4e94",
"\u661f\u671f\u516d"
],
"MONTH": [
"1\u6708",
"2\u6708",
"3\u6708",
"4\u6708",
"5\u6708",
"6\u6708",
"7\u6708",
"8\u6708",
"9\u6708",
"10\u6708",
"11\u6708",
"12\u6708"
],
"SHORTDAY": [
"\u9031\u65e5",
"\u9031\u4e00",
"\u9031\u4e8c",
"\u9031\u4e09",
"\u9031\u56db",
"\u9031\u4e94",
"\u9031\u516d"
],
"SHORTMONTH": [
"1\u6708",
"2\u6708",
"3\u6708",
"4\u6708",
"5\u6708",
"6\u6708",
"7\u6708",
"8\u6708",
"9\u6708",
"10\u6708",
"11\u6708",
"12\u6708"
],
"fullDate": "y\u5e74M\u6708d\u65e5EEEE",
"longDate": "y\u5e74M\u6708d\u65e5",
"medium": "y\u5e74M\u6708d\u65e5 ah:mm:ss",
"mediumDate": "y\u5e74M\u6708d\u65e5",
"mediumTime": "ah:mm:ss",
"short": "d/M/yy ah:mm",
"shortDate": "d/M/yy",
"shortTime": "ah:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "$",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "zh-hk",
"pluralCat": function(n, opt_precision) { return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/CheatPy-0.0.4.tar.gz/CheatPy-0.0.4/README.md | Example Code:
```py
from CheatPy import *
import time
def MENUCLI(dz:Cheat,cheat1:AddressView,cheat2:AddressView,cheat3: AddressView,cheat4: AddressView,cheat5: AddressView):
def ToStr(v: bool):
if v:
return "ON"
else:
return "OFF"
enable1 = enable2 = enable3 = enable4 = False
event1 = event2 = event3 = event4 = event5 = event6 = False
cheatbuff = {
False:[0xF7, 0xDA],
True:[0x31, 0xD2]
}
cheatbuff2 = {
False:[0x29, 0xF8 ],
True: [0x90,0x90]
}
cheatbuff3 = {
False:[ 0x39, 0x83, 0x40, 0x01, 0x00, 0x00 ],
True :[ 0x39, 0xC0, 0x90, 0x90, 0x90, 0x90 ]
}
offset = [0x60,0x98,0x40,0xA8]
print("1. No Consume Tower")
print("2. No Consume Update Tower")
print("3. No Damage")
print("4. Fast Update Tower")
print("5. Add Money")
print("6. Close")
while True:
event1 = GetKey(KeyCode.Keypad1)
if event1:
enable1 = not enable1
cheat1.SetBytes(cheatbuff[enable1])
event2 = GetKey(KeyCode.Keypad2)
if event2:
enable2 = not enable2
cheat2.SetBytes(cheatbuff[enable2])
event3 = GetKey(KeyCode.Keypad3)
if event3:
enable3 = not enable3
cheat3.SetBytes(cheatbuff2[enable3])
event4 = GetKey(KeyCode.Keypad4)
if event4:
enable4 = not enable4
cheat4.SetBytes(cheatbuff3[enable4])
event5 = GetKey(KeyCode.Keypad5)
if event5:
cheat6 = dz.GetAddressView(cheat5.Address,offset)
cheat6.SetInt(cheat6.ToInt() + 1000)
event6 = GetKey(KeyCode.Keypad6)
if event6:
dz.Close()
break
time.sleep(1)
dz = GetProcess("Defense Zone - Original.exe")
if dz:
gameassembly = dz.GetModule("GameAssembly.dll")
gameassembly_addr = gameassembly.Address
unityengine = dz.GetModule("UnityPlayer.dll").Address
test = dz.ScannerModule([0x8B, 0x90, 0x14, 0x01, 0x00, 0x00, 0x45, 0x33, 0xC0, 0xF7, 0xDA, 0x48, 0x8B, 0xCB],gameassembly,1)
if len(test) > 0:
print(hex(test[0].Address))
cheat5 = dz.GetAddressView(unityengine + 0x01614508)
if gameassembly_addr != 0:
cheat1 = dz.GetAddressView(gameassembly_addr + 0x18288C)
cheat2 = dz.GetAddressView(gameassembly_addr + 0x17BCB1)
cheat3 = dz.GetAddressView(gameassembly_addr + 0x181797)
cheat4 = dz.GetAddressView(gameassembly_addr + 0x24EAB9)
MENUCLI(dz,cheat1,cheat2,cheat3,cheat4,cheat5)
else:
Alert("Not found","Maury Dev")
``` | PypiClean |
/Findex_GUI-0.2.18-py3-none-any.whl/findex_gui/static/js/bootstrap.min.js | if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1||b[0]>3)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){if(a(b.target).is(this))return b.handleObj.handler.apply(this,arguments)}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.7",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a("#"===f?[]:f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.7",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c).prop(c,!0)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c).prop(c,!1))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")?(c.prop("checked")&&(a=!1),b.find(".active").removeClass("active"),this.$element.addClass("active")):"checkbox"==c.prop("type")&&(c.prop("checked")!==this.$element.hasClass("active")&&(a=!1),this.$element.toggleClass("active")),c.prop("checked",this.$element.hasClass("active")),a&&c.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target).closest(".btn");b.call(d,"toggle"),a(c.target).is('input[type="radio"], input[type="checkbox"]')||(c.preventDefault(),d.is("input,button")?d.trigger("focus"):d.find("input:visible,button:visible").first().trigger("focus"))}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));if(!(a>this.$items.length-1||a<0))return this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){if(!this.sliding)return this.slide("next")},c.prototype.prev=function(){if(!this.sliding)return this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle="collapse"][href="#'+b.id+'"],[data-toggle="collapse"][data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.7",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":e.data();c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function c(c){c&&3===c.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=b(d),f={relatedTarget:this};e.hasClass("open")&&(c&&"click"==c.type&&/input|textarea/i.test(c.target.tagName)&&a.contains(e[0],c.target)||(e.trigger(c=a.Event("hide.bs.dropdown",f)),c.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger(a.Event("hidden.bs.dropdown",f)))))}))}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.7",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=b(e),g=f.hasClass("open");if(c(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",c);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger(a.Event("shown.bs.dropdown",h))}return!1}},g.prototype.keydown=function(c){if(/(38|40|27|32)/.test(c.which)&&!/input|textarea/i.test(c.target.tagName)){var d=a(this);if(c.preventDefault(),c.stopPropagation(),!d.is(".disabled, :disabled")){var e=b(d),g=e.hasClass("open");if(!g&&27!=c.which||g&&27==c.which)return 27==c.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.disabled):visible a",i=e.find(".dropdown-menu"+h);if(i.length){var j=i.index(c.target);38==c.which&&j>0&&j--,40==c.which&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",c).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f,g.prototype.keydown).on("keydown.bs.dropdown.data-api",".dropdown-menu",g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$dialog=this.$element.find(".modal-dialog"),this.$backdrop=null,this.isShown=null,this.originalBodyPad=null,this.scrollbarWidth=0,this.ignoreBackdropClick=!1,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=300,c.BACKDROP_TRANSITION_DURATION=150,c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var d=this,e=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(e),this.isShown||e.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.$dialog.on("mousedown.dismiss.bs.modal",function(){d.$element.one("mouseup.dismiss.bs.modal",function(b){a(b.target).is(d.$element)&&(d.ignoreBackdropClick=!0)})}),this.backdrop(function(){var e=a.support.transition&&d.$element.hasClass("fade");d.$element.parent().length||d.$element.appendTo(d.$body),d.$element.show().scrollTop(0),d.adjustDialog(),e&&d.$element[0].offsetWidth,d.$element.addClass("in"),d.enforceFocus();var f=a.Event("shown.bs.modal",{relatedTarget:b});e?d.$dialog.one("bsTransitionEnd",function(){d.$element.trigger("focus").trigger(f)}).emulateTransitionEnd(c.TRANSITION_DURATION):d.$element.trigger("focus").trigger(f)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").off("click.dismiss.bs.modal").off("mouseup.dismiss.bs.modal"),this.$dialog.off("mousedown.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(c.TRANSITION_DURATION):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){document===a.target||this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keydown.dismiss.bs.modal")},c.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$body.removeClass("modal-open"),a.resetAdjustments(),a.resetScrollbar(),a.$element.trigger("hidden.bs.modal")})},c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var d=this,e=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var f=a.support.transition&&e;if(this.$backdrop=a(document.createElement("div")).addClass("modal-backdrop "+e).appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){return this.ignoreBackdropClick?void(this.ignoreBackdropClick=!1):void(a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus():this.hide()))},this)),f&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;f?this.$backdrop.one("bsTransitionEnd",b).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var g=function(){d.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",g).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):g()}else b&&b()},c.prototype.handleUpdate=function(){this.adjustDialog()},c.prototype.adjustDialog=function(){var a=this.$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth<a,this.scrollbarWidth=this.measureScrollbar()},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.originalBodyPad=document.body.style.paddingRight||"",this.bodyIsOverflowing&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding-right",this.originalBodyPad)},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.append(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevented()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.tooltip",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.type=null,this.options=null,this.enabled=null,this.timeout=null,this.hoverState=null,this.$element=null,this.inState=null,this.init("tooltip",a,b)};c.VERSION="3.3.7",c.TRANSITION_DURATION=150,c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(a.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusin"==b.type?"focus":"hover"]=!0),c.tip().hasClass("in")||"in"==c.hoverState?void(c.hoverState="in"):(clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.isInStateTrue=function(){for(var a in this.inState)if(this.inState[a])return!0;return!1},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);if(c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusout"==b.type?"focus":"hover"]=!1),!c.isInStateTrue())return clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.getPosition(this.$viewport);h="bottom"==h&&k.bottom+m>o.bottom?"top":"top"==h&&k.top-m<o.top?"bottom":"right"==h&&k.right+l>o.width?"left":"left"==h&&k.left-l<o.left?"right":h,f.removeClass(n).addClass(h)}var p=this.getCalculatedOffset(h,k,l,m);this.applyPlacement(p,h);var q=function(){var a=e.hoverState;e.$element.trigger("shown.bs."+e.type),e.hoverState=null,"out"==a&&e.leave(e)};a.support.transition&&this.$tip.hasClass("fade")?f.one("bsTransitionEnd",q).emulateTransitionEnd(c.TRANSITION_DURATION):q()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top+=g,b.left+=h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewportAdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=/top|bottom/.test(c),m=l?2*k.left-e+i:2*k.top-f+j,n=l?"offsetWidth":"offsetHeight";d.offset(b),this.replaceArrow(m,d[0][n],l)},c.prototype.replaceArrow=function(a,b,c){this.arrow().css(c?"left":"top",50*(1-a/b)+"%").css(c?"top":"left","")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(b){function d(){"in"!=e.hoverState&&f.detach(),e.$element&&e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),b&&b()}var e=this,f=a(this.$tip),g=a.Event("hide.bs."+this.type);if(this.$element.trigger(g),!g.isDefaultPrevented())return f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one("bsTransitionEnd",d).emulateTransitionEnd(c.TRANSITION_DURATION):d(),this.hoverState=null,this},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return this.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d="BODY"==c.tagName,e=c.getBoundingClientRect();null==e.width&&(e=a.extend({},e,{width:e.right-e.left,height:e.bottom-e.top}));var f=window.SVGElement&&c instanceof window.SVGElement,g=d?{top:0,left:0}:f?null:b.offset(),h={scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop()},i=d?{width:a(window).width(),height:a(window).height()}:null;return a.extend({},e,h,i,g)},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)return e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.right&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){if(!this.$tip&&(this.$tip=a(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),b?(c.inState.click=!c.inState.click,c.isInStateTrue()?c.enter(c):c.leave(c)):c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off("."+a.type).removeData("bs."+a.type),a.$tip&&a.$tip.detach(),a.$tip=null,a.$arrow=null,a.$viewport=null,a.$element=null})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.7",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.7",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c="offset",d=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c="position",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data("target")||b.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<e[0])return this.activeTarget=null,this.clear();for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(void 0===e[a+1]||b<e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){
this.activeTarget=b,this.clear();var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")},b.prototype.clear=function(){a(this.selector).parentsUntil(this.options.target,".active").removeClass("active")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.3.7",c.TRANSITION_DURATION=150,c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a"),f=a.Event("hide.bs.tab",{relatedTarget:b[0]}),g=a.Event("show.bs.tab",{relatedTarget:e[0]});if(e.trigger(f),b.trigger(g),!g.isDefaultPrevented()&&!f.isDefaultPrevented()){var h=a(d);this.activate(b.closest("li"),c),this.activate(h,h.parent(),function(){e.trigger({type:"hidden.bs.tab",relatedTarget:b[0]}),b.trigger({type:"shown.bs.tab",relatedTarget:e[0]})})}}},c.prototype.activate=function(b,d,e){function f(){g.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu").length&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&&a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.3.7",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&"top"==this.affixed)return e<c&&"top";if("bottom"==this.affixed)return null!=c?!(e+this.unpin<=f.top)&&"bottom":!(e+g<=a-d)&&"bottom";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&e<=c?"top":null!=d&&i+j>=a-d&&"bottom"},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=Math.max(a(document).height(),a(document.body).height());"object"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix"+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery); | PypiClean |
/Euphorie-15.0.2.tar.gz/Euphorie-15.0.2/src/euphorie/client/resources/oira/script/chunks/87489.d6fb40ef0db3305f3489.min.js | (self.webpackChunk_patternslib_patternslib=self.webpackChunk_patternslib_patternslib||[]).push([[87489],{87489:function(e){e.exports=function(e){const n="[a-z'][a-zA-Z0-9_']*",i="("+n+":"+n+"|"+n+")",a={keyword:"after and andalso|10 band begin bnot bor bsl bzr bxor case catch cond div end fun if let not of orelse|10 query receive rem try when xor",literal:"false true"},r=e.COMMENT("%","$"),t={className:"number",begin:"\\b(\\d+(_\\d+)*#[a-fA-F0-9]+(_[a-fA-F0-9]+)*|\\d+(_\\d+)*(\\.\\d+(_\\d+)*)?([eE][-+]?\\d+)?)",relevance:0},s={begin:"fun\\s+"+n+"/\\d+"},c={begin:i+"\\(",end:"\\)",returnBegin:!0,relevance:0,contains:[{begin:i,relevance:0},{begin:"\\(",end:"\\)",endsWithParent:!0,returnEnd:!0,relevance:0}]},o={begin:/\{/,end:/\}/,relevance:0},d={begin:"\\b_([A-Z][A-Za-z0-9_]*)?",relevance:0},l={begin:"[A-Z][a-zA-Z0-9_]*",relevance:0},b={begin:"#"+e.UNDERSCORE_IDENT_RE,relevance:0,returnBegin:!0,contains:[{begin:"#"+e.UNDERSCORE_IDENT_RE,relevance:0},{begin:/\{/,end:/\}/,relevance:0}]},_={beginKeywords:"fun receive if try case",end:"end",keywords:a};_.contains=[r,s,e.inherit(e.APOS_STRING_MODE,{className:""}),_,c,e.QUOTE_STRING_MODE,t,o,d,l,b];const g=[r,s,_,c,e.QUOTE_STRING_MODE,t,o,d,l,b];c.contains[1].contains=g,o.contains=g,b.contains[1].contains=g;const u={className:"params",begin:"\\(",end:"\\)",contains:g};return{name:"Erlang",aliases:["erl"],keywords:a,illegal:"(</|\\*=|\\+=|-=|/\\*|\\*/|\\(\\*|\\*\\))",contains:[{className:"function",begin:"^"+n+"\\s*\\(",end:"->",returnBegin:!0,illegal:"\\(|#|//|/\\*|\\\\|:|;",contains:[u,e.inherit(e.TITLE_MODE,{begin:n})],starts:{end:";|\\.",keywords:a,contains:g}},r,{begin:"^-",end:"\\.",relevance:0,excludeEnd:!0,returnBegin:!0,keywords:{$pattern:"-"+e.IDENT_RE,keyword:["-module","-record","-undef","-export","-ifdef","-ifndef","-author","-copyright","-doc","-vsn","-import","-include","-include_lib","-compile","-define","-else","-endif","-file","-behaviour","-behavior","-spec"].map((e=>`${e}|1.5`)).join(" ")},contains:[u]},t,e.QUOTE_STRING_MODE,b,d,l,o,{begin:/\.$/}]}}}}]);
//# sourceMappingURL=87489.d6fb40ef0db3305f3489.min.js.map | PypiClean |
/EnvelopesWithSMTPS-0.4.tar.gz/EnvelopesWithSMTPS-0.4/CHANGES.rst | Envelopes Changelog
===================
Version 0.4
-----------
Published on 2013-11-10
* Closes `#10 <https://github.com/tomekwojcik/envelopes/issues/10>`_.
* Closes `#11 <https://github.com/tomekwojcik/envelopes/issues/11>`_.
* Closes `#12 <https://github.com/tomekwojcik/envelopes/issues/12>`_.
* Closes `#13 <https://github.com/tomekwojcik/envelopes/issues/13>`_.
* Closes `#15 <https://github.com/tomekwojcik/envelopes/issues/15>`_.
* Closes `#16 <https://github.com/tomekwojcik/envelopes/issues/15>`_.
Version 0.3
-----------
Published on 2013-08-19
* Closes `#6 <https://github.com/tomekwojcik/envelopes/issues/6>`_.
* Closes `#5 <https://github.com/tomekwojcik/envelopes/issues/5>`_.
Version 0.2
-----------
Published on 2013-08-10
* Closes `#3 <https://github.com/tomekwojcik/envelopes/issues/3>`_.
* Closes `#1 <https://github.com/tomekwojcik/envelopes/issues/1>`_.
Version 0.1.1
-------------
Published on 2013-08-06
* Fixes for PyPI.
Version 0.1
-----------
Published on 2013-08-06
* Initial version.
| PypiClean |
/Bibtex_File_Comparison_and_Update-1.4.tar.gz/Bibtex_File_Comparison_and_Update-1.4/docs/build/html/_static/doctools.js | * select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*/
jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' ');
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s == 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node) {
if (node.nodeType == 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
var span = document.createElement("span");
span.className = className;
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this);
});
}
}
return this.each(function() {
highlight(this);
});
};
/*
* backward compatibility for jQuery.browser
* This will be supported until firefox bug is fixed.
*/
if (!jQuery.browser) {
jQuery.uaMatch = function(ua) {
ua = ua.toLowerCase();
var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
/(webkit)[ \/]([\w.]+)/.exec(ua) ||
/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
/(msie) ([\w.]+)/.exec(ua) ||
ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
[];
return {
browser: match[ 1 ] || "",
version: match[ 2 ] || "0"
};
};
jQuery.browser = {};
jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
}
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated == 'undefined')
return string;
return (typeof translated == 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated == 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
* see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
if (!body.length) {
body = $('body');
}
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<p class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
.appendTo($('#searchbox'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) == 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('#searchbox .highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this == '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
}); | PypiClean |
/DuHast-1.0.7-py3-none-any.whl/duHast/APISamples/RevitGrids.py |
import clr
import System
# import common library modules
import RevitCommonAPI as com
import RevitWorksets as rWork
import Result as res
import Utility as util
import RevitFamilyUtils as rFamU
# import Autodesk
import Autodesk.Revit.DB as rdb
clr.ImportExtensions(System.Linq)
# -------------------------------------------- common variables --------------------
#: header used in reports
REPORT_GRIDS_HEADER = ['HOSTFILE','ID', 'NAME', 'WORKSETNAME', 'EXTENTMAX', 'EXTENTMIN']
# --------------------------------------------- utility functions ------------------
def GetAllGridHeadsByCategory(doc):
'''
Gets all grid head types in the model.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector with grid head types
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
collector = rdb.FilteredElementCollector(doc).OfCategory(rdb.BuiltInCategory.OST_GridHeads).WhereElementIsElementType()
return collector
def GetAllGridTypesByCategory(doc):
'''
Gets all grid types in the model
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector with grid types
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
collector = rdb.FilteredElementCollector(doc).OfCategory(rdb.BuiltInCategory.OST_Grids).WhereElementIsElementType()
return collector
def GetAllGridTypeIdsByCategory(doc):
'''
Gets all grid types ids in the model.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:return: A filtered element collector with grid type ids
:rtype: Autodesk.Revit.DB.FilteredElementCollector
'''
collector = GetAllGridTypesByCategory(doc)
ids = com.GetIdsFromElementCollector(collector)
return ids
def GetGridTypeNames (doc, g):
'''
Gets all valid grid types, based on a past in grid, available in model.
Uses grid.GetValidTypes() to get the grid types.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:param g: A grid
:type g: Autodesk.Revit.DB.Grid
:return: A nested set of lists containing grid type id and grid type name
:rtype: list of lists [[GridTypeId as Revit ElementId, grid type name as string],[...]]
'''
validGridTypes = []
validGridTypeIds = g.GetValidTypes()
for validGridTypeId in validGridTypeIds:
gridData = []
gtypeT = doc.GetElement(validGridTypeId)
gridData.append(validGridTypeId)
gridData.append(rdb.Element.Name.GetValue(gtypeT))
validGridTypes.append(gridData)
return validGridTypes
def GetGridTypeName (doc, g):
'''
Gets the grid type name of a grid.
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:param g: A grid.
:type g: Autodesk.Revit.DB.Grid
:return: The grid type name.
:rtype: str
'''
value = 'unknown'
gtypeT = doc.GetElement(g.GetTypeId())
value = rdb.Element.Name.GetValue(gtypeT)
return value
def GetGridTypeIdByName (doc, gridTypeName):
'''
Gets the grid type Id based on it's name, if no match found it returns the Revit Invalid Element Id
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:param gridTypeName: The grid type name.
:type gridTypeName: str
:return: The grids type Id or if not match is found Autodesk.Revit.DB.ElementId.InvalidElementId
:rtype: Autodesk.Revit.DB.ElementId
'''
id = rdb.ElementId.InvalidElementId
grids = rdb.FilteredElementCollector(doc).OfClass(rdb.Grid).ToList()
if(len(grids) > 0):
g = grids[0]
validGridTypeIds = g.GetValidTypes()
for gridTypId in validGridTypeIds:
gtypeTName = rdb.Element.Name.GetValue(doc.GetElement(gridTypId))
if(gtypeTName == gridTypeName):
id = gridTypId
break
return id
def GridCheckParameterValue(g, paraName, paraCondition, conditionValue):
'''
Returns true if a given parameter on a grid has a value meeting the parameter condition.
:param g: A grid.
:type g: Autodesk.Revit.DB.Grid
:param paraName: A parameter Name.
:type paraName: str
:param paraCondition: A function evaluating the parameter value. First argument is the value to be checked against. Second argument is the actual parameter value.
:type paraCondition: func(arg1,arg2)
:param conditionValue: The value to be checked against.
:type conditionValue: var
:return: True if parameter value is evaluated to True otherwise False.
:rtype: bool
'''
ruleMatch = False
pValue = com.GetParameterValueByName(g, paraName)
if (pValue != None):
ruleMatch = com.CheckParameterValue(g, paraCondition, conditionValue)
return ruleMatch
def GetMaxExtentAsString(g):
'''
Gets the maximum extent of a grid.
:param g: A grid.
:type g: Autodesk.Revit.DB.Grid
:return: A string in format [maxX,maxY,maxZ]<tab>[minX,minY,minZ]
:rtype: str
'''
ex = g.GetExtents()
max = '['+ ','.join([str(ex.MaximumPoint.X), str(ex.MaximumPoint.Y), str(ex.MaximumPoint.Z)]) + ']'
min = '['+ ','.join([str(ex.MinimumPoint.X), str(ex.MinimumPoint.Y), str(ex.MinimumPoint.Z)]) + ']'
return '\t'.join([min, max])
# ------------------------------------------------------ grids workset modifiers ------------------------------------------
# workset modifier method
# moves all grids to one workset
# returns a result object
# rules format:
#['Model name',[
# [workset modifier method,
# [worksetName]
# ]
# ]
def ModifyGridWorkSetsDefault (doc, worksetRules):
gridsResults = res.Result()
collectorGrids = rdb.FilteredElementCollector(doc).OfClass(rdb.Grid)
for rule in worksetRules:
for defaultWorksetName in rule:
grids = rWork.ModifyElementWorkset(doc, defaultWorksetName, collectorGrids, 'grids')
gridsResults.Update(grids)
return gridsResults
# workset modifier method
# moves grids matching type condition to a particular workset
# returns a result object
#defaultWorksetTypeRules_ = [
# ['model name',[
# [ModifyGridWorkSetsByTypeName,[
# ['workset name', util.ConDoesNotEqual, 'grid type name'],
# ['workset name', util.ConDoesEqual, 'grid type name']
# ]
# ]
# ]
# ]
#]
def ModifyGridWorkSetsByTypeName(doc, worksetRules):
gridsResults = res.Result()
# loop over grid type filter and address one at the time
# get all grids matching type name filter
for defaultWorksetName, typeNameCondition, typeName, in worksetRules:
# get the grid type id from the type name
typeId = GetGridTypeIdByName(doc, typeName)
collectorGrids = rdb.FilteredElementCollector(doc).OfClass(rdb.Grid).Where(lambda e: typeNameCondition(e.GetTypeId(), typeId))
grids = rWork.ModifyElementWorkset(doc, defaultWorksetName, collectorGrids, 'grids')
gridsResults.Update(grids)
return gridsResults
# workset modifier method
# moves grids matching parameter condition to a particular workset
# returns a result object
#defaultWorksetRulesNames_ = [
# ['model name',[
# [ModifyGridWorkSetsByParameterValue,[
# ['workset name', util.ConTwoStartWithOne, 'Name' ,'name starts with value']
# ]
# ]
# ]
# ]
#]
def ModifyGridWorkSetsByParameterValue(doc, worksetRules):
gridsResults = res.Result()
# loop over grid parameter filter and address one at the time
# get all grids matching filter
for defaultWorksetName, paraCondition, paraName, conditionValue in worksetRules:
collectorGrids = rdb.FilteredElementCollector(doc).OfClass(rdb.Grid).Where(lambda e: GridCheckParameterValue(e, paraName, paraCondition, conditionValue))
grids = rWork.ModifyElementWorkset(doc, defaultWorksetName, collectorGrids, 'grids')
gridsResults.Update(grids)
return gridsResults
# modifies worksets of grids as per workset rules
# returns a result object
# doc: the current revit document
# rules format:
#defaultWorksetRulesAll_ = [
# ['model name',[
# [ModifyGridWorkSetsDefault,[
# ['default workset name'] # there should only be one per model
# ]
# ]
# ]
# ]
#]
def ModifyGridsWorksets(doc, revitFileName, worksetRules):
gridsResults = res.Result()
foundMatch = False
for fileName, worksetModifierList in worksetRules:
if (revitFileName.startswith(fileName)):
foundMatch = True
for worksetModifier, rules in worksetModifierList:
grids = worksetModifier(doc, rules)
gridsResults.Update(grids)
break
if foundMatch == False:
gridsResults.UpdateSep(False, 'No grid rules found for file: ' + revitFileName)
return gridsResults
# ------------------------------------------------------- Grid reporting --------------------------------------------------------------------
def GetGridReportData(doc, revitFilePath):
'''
Gets grid data ready for being printed to file
:param doc: Current Revit model document.
:type doc: Autodesk.Revit.DB.Document
:param revitFilePath: fully qualified file path of Revit file
:type revitFilePath: str
:return: list of list of revit grid properties.
:rtype: [[str]]
'''
data = []
for p in rdb.FilteredElementCollector(doc).OfClass(rdb.Grid):
data.append([
util.GetFileNameWithoutExt(revitFilePath),
str(p.Id.IntegerValue),
util.EncodeAscii(p.Name),
rWork.GetWorksetNameById(doc, p.WorksetId.IntegerValue),
GetMaxExtentAsString(p)])
return data
# ------------------------------------------------- purge --------------------------------------------------------------------
# doc current document
def GetUnusedGridTypesForPurge(doc):
''' this will return all ids of unused grid types in the model to be purged'''
return com.GetUsedUnusedTypeIds(doc, GetAllGridTypeIdsByCategory, 0, 8)
# doc current document
def GetAllGridHeadFamilyTypeIds(doc):
''' this will return all ids grid head family types in the model'''
ids = []
filter = rdb.ElementCategoryFilter(rdb.BuiltInCategory.OST_GridHeads)
col = rdb.FilteredElementCollector(doc).OfClass(rdb.FamilySymbol).WherePasses(filter)
ids = com.GetIdsFromElementCollector(col)
return ids
# doc current document
def GetUnusedGridHeadFamilies(doc):
''' this will return all ids of unused family symbols (types) of grid head families'''
usedTypes = com.GetUsedUnusedTypeIds(doc, GetAllGridTypeIdsByCategory, 1, 8)
headsInUseIds = []
for Id in usedTypes:
type = doc.GetElement(Id)
id = com.GetBuiltInParameterValue(type, rdb.BuiltInParameter.GRID_HEAD_TAG)
if (id != None and id not in headsInUseIds):
headsInUseIds.append(id)
allSymbolsInModel = GetAllGridHeadsByCategory(doc)
unusedSymbolIds = []
for symbolInModel in allSymbolsInModel:
if(symbolInModel.Id not in headsInUseIds ):
unusedSymbolIds.append(symbolInModel.Id)
return unusedSymbolIds
# doc current document
def GetUnusedGridHeadFamiliesForPurge(doc):
''' this will return all ids of unused grid head symbols and families to be purged'''
return rFamU.GetUnusedInPlaceIdsForPurge(doc, GetUnusedGridHeadFamilies) | PypiClean |
/AnyRobot-1.5.4.tar.gz/AnyRobot-1.5.4/aishu/datafaker/profession/entity/port.py | import random
import time
import requests,json
from aishu.public import urlJoin
from aishu.public.operationJson import OperetionJson
from aishu.setting import header
class date(object):
def getPort(self):
# 系统合法参数 20010-20099、162,514,5140
portList = [port for port in range(20010, 20100)]
portList.append(162)
portList.append(514)
portList.append(5140)
port = random.choice(portList)
return port
def getEtlPort(self):
path = "/etl/input/list?start=0&limit=-1"
payload = {}
headers = header
response = requests.request("GET", urlJoin.url(path), headers=headers, data=payload)
date = response.json()
a = OperetionJson(date)
value = a.get_value('port')
if value:
return value
else:
return []
def getEtlPortOld(self):
data = self.getEtlPort()
if len(data) == 0:
port = 0
return port
else:
port = random.choice(data)
return port
def getEtlPortNew(self):
oldNew = self.getEtlPort()
count = 0
flag = True
while flag or count >= 10:
newPort = self.getPort()
count = count + 1
if newPort not in oldNew:
flag = False
return newPort
return ''
def getEtlPortIll(self):
portList = [port for port in range(10000, 20000)]
port = random.choice(portList)
return port
def getOpenlogPort(self):
path = "/etl/input/list?start=0&limit=-1"
payload = {}
headers = header
res = requests.request("GET", urlJoin.url(path), headers=headers, data=payload)
data = res.json()
# 从AR中找到对应的端口,若没找到,则生成对应端口,并返回端口号。
for port_info in data:
if port_info['type'] == 'testtransfer' and port_info['protocol'] == 'tcp' and port_info['status'] == 1:
return port_info['port']
new_port = self.getEtlPortNew()
create_input_data = {
"community": [],
"port": f"{new_port}",
"protocol": "tcp",
"ruleName": None,
"status": 1,
"tagsID": [],
"tags": [],
"timezone": "Asia/Shanghai",
"type": "testtransfer",
"charset": "UTF-8"
}
path1 = "/etl/input"
res1 = requests.request("POST", urlJoin.url(path1), headers=headers, data=json.dumps(create_input_data))
time.sleep(60)
if res1.status_code != 200:
return ''
return new_port
if __name__ == '__main__':
print(date().getOpenlogPort()) | PypiClean |
/Hikka_Pyro-2.0.66-py3-none-any.whl/pyrogram/methods/chats/set_chat_photo.py |
import os
from typing import Union, BinaryIO
import pyrogram
from pyrogram import raw
from pyrogram import utils
from pyrogram.file_id import FileType
class SetChatPhoto:
async def set_chat_photo(
self: "pyrogram.Client",
chat_id: Union[int, str],
*,
photo: Union[str, BinaryIO] = None,
video: Union[str, BinaryIO] = None,
video_start_ts: float = None,
) -> bool:
"""Set a new chat photo or video (H.264/MPEG-4 AVC video, max 5 seconds).
The ``photo`` and ``video`` arguments are mutually exclusive.
Pass either one as named argument (see examples below).
You must be an administrator in the chat for this to work and must have the appropriate admin rights.
.. include:: /_includes/usable-by/users-bots.rst
Parameters:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
photo (``str`` | ``BinaryIO``, *optional*):
New chat photo. You can pass a :obj:`~pyrogram.types.Photo` file_id, a file path to upload a new photo
from your local machine or a binary file-like object with its attribute
".name" set for in-memory uploads.
video (``str`` | ``BinaryIO``, *optional*):
New chat video. You can pass a :obj:`~pyrogram.types.Video` file_id, a file path to upload a new video
from your local machine or a binary file-like object with its attribute
".name" set for in-memory uploads.
video_start_ts (``float``, *optional*):
The timestamp in seconds of the video frame to use as photo profile preview.
Returns:
``bool``: True on success.
Raises:
ValueError: if a chat_id belongs to user.
Example:
.. code-block:: python
# Set chat photo using a local file
await app.set_chat_photo(chat_id, photo="photo.jpg")
# Set chat photo using an existing Photo file_id
await app.set_chat_photo(chat_id, photo=photo.file_id)
# Set chat video using a local file
await app.set_chat_photo(chat_id, video="video.mp4")
# Set chat photo using an existing Video file_id
await app.set_chat_photo(chat_id, video=video.file_id)
"""
peer = await self.resolve_peer(chat_id)
if isinstance(photo, str):
if os.path.isfile(photo):
photo = raw.types.InputChatUploadedPhoto(
file=await self.save_file(photo),
video=await self.save_file(video),
video_start_ts=video_start_ts,
)
else:
photo = utils.get_input_media_from_file_id(photo, FileType.PHOTO)
photo = raw.types.InputChatPhoto(id=photo.id)
else:
photo = raw.types.InputChatUploadedPhoto(
file=await self.save_file(photo),
video=await self.save_file(video),
video_start_ts=video_start_ts,
)
if isinstance(peer, raw.types.InputPeerChat):
await self.invoke(
raw.functions.messages.EditChatPhoto(
chat_id=peer.chat_id,
photo=photo,
)
)
elif isinstance(peer, raw.types.InputPeerChannel):
await self.invoke(
raw.functions.channels.EditPhoto(
channel=peer,
photo=photo
)
)
else:
raise ValueError(f'The chat_id "{chat_id}" belongs to a user')
return True | PypiClean |
/ModelicaLanguage-0.0.0a6-py3-none-any.whl/modelicalang/parsers/syntax/_equations.py | __all__ = (
"equation_section",
"algorithm_section",
"equation",
"statement",
"if_equation",
"if_statement",
"for_equation",
"for_statement",
"for_indices",
"for_index",
"while_statement",
"when_equation",
"when_statement",
"connect_clause",
)
from arpeggio import (
Optional, ZeroOrMore, OneOrMore,
)
from .. import syntax
def equation_section():
"""
equation_section =
INITIAL? EQUATION (equation ";")*
"""
return (
Optional(syntax.INITIAL), syntax.EQUATION,
ZeroOrMore(syntax.equation, ";"),
)
def algorithm_section():
"""
algorithm_section =
INITIAL? ALGORITHM (statement ";")*
"""
return (
Optional(syntax.INITIAL), syntax.ALGORITHM,
ZeroOrMore(syntax.statement, ";"),
)
def equation():
"""
equation =
(
if_equation
/ for_equation
/ connect_clause
/ when_equation
/ simple_expression "=" expression
/ component_reference function_call_args
)
comment
"""
return (
[
syntax.if_equation,
syntax.for_equation,
syntax.connect_clause,
syntax.when_equation,
(syntax.simple_expression, "=", syntax.expression),
(syntax.component_reference, syntax.function_call_args),
],
syntax.comment,
)
def statement():
"""
statement =
(
BREAK
/ RETURN
/ if_statement
/ for_statement
/ while_statement
/ when_statement
/ "(" output_expression_list ")" ":="
component_reference function_call_args
/ component_reference (":=" expression / function_call_args)
)
comment
"""
return (
[
syntax.BREAK,
syntax.RETURN,
syntax.if_statement,
syntax.for_statement,
syntax.while_statement,
syntax.when_statement,
(
"(", syntax.output_expression_list, ")", ":=",
syntax.component_reference, syntax.function_call_args,
),
(
syntax.component_reference,
[(":=", syntax.expression), syntax.function_call_args],
),
],
syntax.comment,
)
def if_equation():
"""
if_equation =
IF expression THEN (equation ";")*
( ELSEIF expression THEN (equation ";")* )*
( ELSE (equation ";")* )?
END IF
"""
return (
(
syntax.IF, syntax.expression, syntax.THEN,
ZeroOrMore(
syntax.equation, ";",
),
),
ZeroOrMore(
syntax.ELSEIF, syntax.expression, syntax.THEN,
ZeroOrMore(
syntax.equation, ";",
),
),
Optional(
syntax.ELSE,
ZeroOrMore(
syntax.equation, ";",
),
),
syntax.END, syntax.IF,
)
def if_statement():
"""
if_statement =
IF expression THEN (statement ";")*
( ELSEIF expression THEN (statement ";")* )*
( ELSE (statement ";")* )?
END IF
"""
return (
(
syntax.IF, syntax.expression, syntax.THEN,
ZeroOrMore(
syntax.statement, ";",
),
),
ZeroOrMore(
syntax.ELSEIF, syntax.expression, syntax.THEN,
ZeroOrMore(
syntax.statement, ";",
),
),
Optional(
syntax.ELSE,
ZeroOrMore(
syntax.statement, ";",
),
),
syntax.END, syntax.IF,
)
def for_equation():
"""
for_equation =
FOR for_indices LOOP
(equation ";")*
END FOR
"""
return (
syntax.FOR, syntax.for_indices, syntax.LOOP,
ZeroOrMore(
syntax.equation, ";",
),
syntax.END, syntax.FOR,
)
def for_statement():
"""
for_statement =
FOR for_indices LOOP
(statement ";")*
END FOR
"""
return (
syntax.FOR, syntax.for_indices, syntax.LOOP,
ZeroOrMore(
syntax.statement, ";",
),
syntax.END, syntax.FOR,
)
def for_indices():
"""
for_indices =
for_index ("," for_index)*
"""
return OneOrMore(syntax.for_index, sep=",")
def for_index():
"""
for_index =
IDENT (IN expression)?
"""
return syntax.IDENT, Optional(syntax.IN, syntax.expression)
def while_statement():
"""
while_statement =
WHILE expression LOOP
(statement ";")*
END WHILE
"""
return (
syntax.WHILE, syntax.expression, syntax.LOOP,
ZeroOrMore(
syntax.statement, ";",
),
syntax.END, syntax.WHILE,
)
def when_equation():
"""
when_equation =
WHEN expression THEN (equation ";")*
( ELSEWHEN expression THEN (equation ";")* )*
END WHEN
"""
return (
(
syntax.WHEN, syntax.expression, syntax.THEN,
ZeroOrMore(
syntax.equation, ";",
),
),
ZeroOrMore(
syntax.ELSEWHEN, syntax.expression, syntax.THEN,
ZeroOrMore(
syntax.equation, ";",
),
),
syntax.END, syntax.WHEN,
)
def when_statement():
"""
when_statement =
WHEN expression THEN (statement ";")*
( ELSEWHEN expression THEN (statement ";")* )*
END WHEN
"""
return (
(
syntax.WHEN, syntax.expression, syntax.THEN,
ZeroOrMore(
syntax.statement, ";",
),
),
ZeroOrMore(
syntax.ELSEWHEN, syntax.expression, syntax.THEN,
ZeroOrMore(
syntax.statement, ";",
),
),
syntax.END, syntax.WHEN,
)
def connect_clause():
"""
connect_clause =
CONNECT "(" component_reference "," component_reference ")"
"""
return (
syntax.CONNECT,
"(", syntax.component_reference, ",", syntax.component_reference, ")",
) | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.