id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/NREL_reV-0.8.1-py3-none-any.whl/reV/hybrids/hybrids.py | import logging
import numpy as np
import re
import pandas as pd
from string import ascii_letters
from warnings import warn
from collections import namedtuple
from reV.handlers.outputs import Outputs
from reV.utilities.exceptions import (FileInputError, InputError,
InputWarning, OutputWarning)
from reV.hybrids.hybrid_methods import HYBRID_METHODS
from rex.resource import Resource
from rex.utilities.utilities import to_records_array
logger = logging.getLogger(__name__)
MERGE_COLUMN = 'sc_point_gid'
PROFILE_DSET_REGEX = 'rep_profiles_[0-9]+$'
SOLAR_PREFIX = 'solar_'
WIND_PREFIX = 'wind_'
NON_DUPLICATE_COLS = {
'latitude', 'longitude', 'country', 'state', 'county', 'elevation',
'timezone', 'sc_point_gid', 'sc_row_ind', 'sc_col_ind'
}
DROPPED_COLUMNS = ['gid']
DEFAULT_FILL_VALUES = {'solar_capacity': 0, 'wind_capacity': 0,
'solar_mean_cf': 0, 'wind_mean_cf': 0}
OUTPUT_PROFILE_NAMES = ['hybrid_profile',
'hybrid_solar_profile',
'hybrid_wind_profile']
RatioColumns = namedtuple('RatioColumns', ['num', 'denom', 'fixed'],
defaults=(None, None, None))
class ColNameFormatter:
"""Column name formatting helper class. """
ALLOWED = set(ascii_letters)
@classmethod
def fmt(cls, n):
"""Format an input column name to remove excess chars and whitespace.
This method should help facilitate the merging of column names
between two DataFrames.
Parameters
----------
n : str
Input column name.
Returns
-------
str
The column name with all characters except ascii stripped
and all lowercase.
"""
return ''.join(c for c in n if c in cls.ALLOWED).lower()
class HybridsData:
"""Hybrids input data container. """
def __init__(self, solar_fpath, wind_fpath):
"""
Parameters
----------
solar_fpath : str
Filepath to rep profile output file to extract solar profiles and
summaries from.
wind_fpath : str
Filepath to rep profile output file to extract wind profiles and
summaries from.
"""
self.solar_fpath = solar_fpath
self.wind_fpath = wind_fpath
self.profile_dset_names = []
self.merge_col_overlap_values = set()
self._solar_meta = None
self._wind_meta = None
self._solar_time_index = None
self._wind_time_index = None
self._hybrid_time_index = None
self.__profile_reg_check = re.compile(PROFILE_DSET_REGEX)
self.__solar_cols = self.solar_meta.columns.map(ColNameFormatter.fmt)
self.__wind_cols = self.wind_meta.columns.map(ColNameFormatter.fmt)
@property
def solar_meta(self):
"""Summary for the solar representative profiles.
Returns
-------
solar_meta : pd.DataFrame
Summary for the solar representative profiles.
"""
if self._solar_meta is None:
with Resource(self.solar_fpath) as res:
self._solar_meta = res.meta
return self._solar_meta
@property
def wind_meta(self):
"""Summary for the wind representative profiles.
Returns
-------
wind_meta : pd.DataFrame
Summary for the wind representative profiles.
"""
if self._wind_meta is None:
with Resource(self.wind_fpath) as res:
self._wind_meta = res.meta
return self._wind_meta
@property
def solar_time_index(self):
"""Get the time index for the solar rep profiles.
Returns
-------
solar_time_index : pd.datetimeindex
Time index sourced from the solar reV gen file.
"""
if self._solar_time_index is None:
with Resource(self.solar_fpath) as res:
self._solar_time_index = res.time_index
return self._solar_time_index
@property
def wind_time_index(self):
"""Get the time index for the wind rep profiles.
Returns
-------
wind_time_index : pd.datetimeindex
Time index sourced from the wind reV gen file.
"""
if self._wind_time_index is None:
with Resource(self.wind_fpath) as res:
self._wind_time_index = res.time_index
return self._wind_time_index
@property
def hybrid_time_index(self):
"""Get the time index for the hybrid rep profiles.
Returns
-------
hybrid_time_index : pd.datetimeindex
Time index for the hybrid rep profiles.
"""
if self._hybrid_time_index is None:
self._hybrid_time_index = self.solar_time_index.join(
self.wind_time_index, how='inner')
return self._hybrid_time_index
def contains_col(self, col_name):
"""Check if input column name exists in either meta data set.
Parameters
----------
col_name : str
Name of column to check for.
Returns
-------
bool
Whether or not the column is found in either meta data set.
"""
fmt_name = ColNameFormatter.fmt(col_name)
col_in_solar = fmt_name in self.__solar_cols
col_in_wind = fmt_name in self.__wind_cols
return col_in_solar or col_in_wind
def validate(self):
"""Validate the input data.
This method checks for a minimum time index length, a unique
profile, and unique merge column that overlaps between both data
sets.
"""
self._validate_time_index()
self._validate_num_profiles()
self._validate_merge_col_exists()
self._validate_unique_merge_col()
self._validate_merge_col_overlaps()
def _validate_time_index(self):
"""Validate the hybrid time index to be of len >= 8760.
Raises
------
FileInputError
If len(time_index) < 8760 for the hybrid profile.
"""
if len(self.hybrid_time_index) < 8760:
msg = ("The length of the merged time index ({}) is less than "
"8760. Please ensure that the input profiles have a "
"time index that overlaps >= 8760 times.")
e = msg.format(len(self.hybrid_time_index))
logger.error(e)
raise FileInputError(e)
def _validate_num_profiles(self):
"""Validate the number of input profiles.
Raises
------
FileInputError
If # of rep_profiles > 1.
"""
for fp in [self.solar_fpath, self.wind_fpath]:
with Resource(fp) as res:
profile_dset_names = [
n for n in res.dsets
if self.__profile_reg_check.match(n)
]
if not profile_dset_names:
msg = ("Did not find any data sets matching the regex: "
"{!r} in {!r}. Please ensure that the profile data "
"exists and that the data set is named correctly.")
e = msg.format(PROFILE_DSET_REGEX, fp)
logger.error(e)
raise FileInputError(e)
elif len(profile_dset_names) > 1:
msg = ("Found more than one profile in {!r}: {}. "
"This module is not intended for hybridization of "
"multiple representative profiles. Please re-run "
"on a single aggregated profile.")
e = msg.format(fp, profile_dset_names)
logger.error(e)
raise FileInputError(e)
else:
self.profile_dset_names += profile_dset_names
def _validate_merge_col_exists(self):
"""Validate the existence of the merge column.
Raises
------
FileInputError
If merge column is missing from either the solar or
the wind meta data.
"""
msg = ("Cannot hybridize: merge column {!r} missing from the "
"{} meta data! ({!r})")
mc = ColNameFormatter.fmt(MERGE_COLUMN)
for cols, fp, res in zip([self.__solar_cols, self.__wind_cols],
[self.solar_fpath, self.wind_fpath],
['solar', 'wind']):
if mc not in cols:
e = msg.format(MERGE_COLUMN, res, fp)
logger.error(e)
raise FileInputError(e)
def _validate_unique_merge_col(self):
"""Validate the existence of unique values in the merge column.
Raises
------
FileInputError
If merge column contains duplicate values in either the solar or
the wind meta data.
"""
msg = ("Duplicate {}s were found. This is likely due to resource "
"class binning, which is not supported at this time. "
"Please re-run supply curve aggregation without "
"resource class binning and ensure there are no duplicate "
"values in {!r}. File: {!r}")
mc = ColNameFormatter.fmt(MERGE_COLUMN)
for ds, cols, fp in zip([self.solar_meta, self.wind_meta],
[self.__solar_cols, self.__wind_cols],
[self.solar_fpath, self.wind_fpath]):
merge_col = ds.columns[cols == mc].item()
if not ds[merge_col].is_unique:
e = msg.format(merge_col, merge_col, fp)
logger.error(e)
raise FileInputError(e)
def _validate_merge_col_overlaps(self):
"""Validate the existence of overlap in the merge column values.
Raises
------
FileInputError
If merge column values do not overlap between the tow input files.
"""
mc = ColNameFormatter.fmt(MERGE_COLUMN)
merge_col = self.solar_meta.columns[self.__solar_cols == mc].item()
solar_vals = set(self.solar_meta[merge_col].values)
merge_col = self.wind_meta.columns[self.__wind_cols == mc].item()
wind_vals = set(self.wind_meta[merge_col].values)
self.merge_col_overlap_values = solar_vals & wind_vals
if not self.merge_col_overlap_values:
msg = ("No overlap detected in the values of {!r} across the "
"input files. Please ensure that at least one of the "
"{!r} values is the same for input files {!r} and {!r}")
e = msg.format(merge_col, merge_col, self.solar_fpath,
self.wind_fpath)
logger.error(e)
raise FileInputError(e)
class MetaHybridizer:
"""Framework to handle hybridization of meta data."""
_INTERNAL_COL_PREFIX = '_h_internal'
def __init__(self, data, allow_solar_only=False,
allow_wind_only=False, fillna=None,
limits=None, ratio_bounds=None,
ratio='solar_capacity/wind_capacity'):
"""
Parameters
----------
data : `HybridsData`
Instance of `HybridsData` containing input data to
hybridize.
allow_solar_only : bool, optional
Option to allow SC points with only solar capacity
(no wind). By default, ``False``.
allow_wind_only : bool, optional
Option to allow SC points with only wind capacity
(no solar), By default, ``False``.
fillna : dict, optional
Dictionary containing column_name, fill_value pairs
representing any fill values that should be applied after
merging the wind and solar meta. Note that column names will
likely have to be prefixed with ``solar`` or ``wind``.
By default, ``None``.
limits : dict, optional
Option to specify mapping (in the form of a dictionary) of
{colum_name: max_value} representing the upper limit
(maximum value) for the values of a column in the merged
meta. For example, `limits={'solar_capacity': 100}` would
limit all the values of the solar capacity in the merged
meta to a maximum value of 100. This limit is applied
*BEFORE* ratio calculations. The names of the columns should
match the column names in the merged meta, so they are
likely prefixed with ``solar`` or ``wind`. By default,
``None`` (no limits applied).
ratio_bounds : tuple, optional
Option to set ratio bounds (in two-tuple form) on the
columns of the `ratio` input. For example,
`ratio_bounds=(0.5, 1.5)` would adjust the values of both of
the `ratio` columns such that their ratio is always between
half and double (e.g., no value would be more than double
the other). To specify a single ratio value, use the same
value as the upper and lower bound. For example,
`ratio_bounds=(1, 1)` would adjust the values of both of the
`ratio` columns such that their ratio is always equal.
By default, ``None`` (no limit on the ratio).
ratio : str, optional
Option to specify the columns used to calculate the ratio
that is limited by the `ratio_bounds` input. This input is a
string in the form
"numerator_column_name/denominator_column_name".
For example, `ratio='solar_capacity/wind_capacity'` would
limit the ratio of the solar to wind capacities as specified
by the `ratio_bounds` input. If `ratio_bounds` is ``None``,
this input does nothing. The names of the columns should be
prefixed with one of the prefixes defined as class
variables. By default ``'solar_capacity/wind_capacity'``.
"""
self.data = data
self._allow_solar_only = allow_solar_only
self._allow_wind_only = allow_wind_only
self._fillna = {**DEFAULT_FILL_VALUES, **(fillna or {})}
self._limits = limits or {}
self._ratio_bounds = ratio_bounds
self._ratio = ratio
self._hybrid_meta = None
self.__hybrid_meta_cols = None
self.__col_name_map = None
self.__solar_rpi_n = '{}_solar_rpidx'.format(self._INTERNAL_COL_PREFIX)
self.__wind_rpi_n = '{}_wind_rpidx'.format(self._INTERNAL_COL_PREFIX)
@property
def hybrid_meta(self):
"""Hybridized summary for the representative profiles.
Returns
-------
hybrid_meta : pd.DataFrame
Summary for the hybridized representative profiles.
At the very least, this has a column that the data was merged on.
"""
if self._hybrid_meta is None or self.__hybrid_meta_cols is None:
return self._hybrid_meta
else:
return self._hybrid_meta[self.__hybrid_meta_cols]
def validate_input(self):
"""Validate the input parameters.
This method validates that the input limit, fill, and ratio columns
are formatted correctly.
"""
self._validate_limits_cols_prefixed()
self._validate_fillna_cols_prefixed()
self._validate_ratio_input()
def _validate_limits_cols_prefixed(self):
"""Ensure the limits columns are formatted correctly.
This check is important because the limiting happens
after the meta has been merged (so columns are already prefixed),
but before the hybrid columns are computed. As a result, the limits
columns _must_ have a valid prefix.
Raises
------
InputError
If limits columns are not prefixed correctly.
"""
for col in self._limits:
self.__validate_col_prefix(
col, (SOLAR_PREFIX, WIND_PREFIX), input_name='limits'
)
@staticmethod
def __validate_col_prefix(col, prefixes, input_name):
"""Validate the the col starts with the correct prefix. """
missing = [not col.startswith(p) for p in prefixes]
if all(missing):
msg = ("Input {0} column {1!r} does not start with a valid "
"prefix: {2!r}. Please ensure that the {0} column "
"names specify the correct resource prefix.")
e = msg.format(input_name, col, prefixes)
logger.error(e)
raise InputError(e)
def _validate_fillna_cols_prefixed(self):
"""Ensure the fillna columns are formatted correctly.
This check is important because the fillna step happens
after the meta has been merged (so columns are already prefixed),
but before the hybrid columns are computed. As a result, the fillna
columns _must_ have a valid prefix.
Raises
------
InputError
If fillna columns are not prefixed correctly.
"""
for col in self._fillna:
self.__validate_col_prefix(
col, (SOLAR_PREFIX, WIND_PREFIX), input_name='fillna'
)
def _validate_ratio_input(self):
"""Validate the ratio input parameters.
This method validates that the input ratio columns are formatted
correctly and exist in the input data. It also verifies that
the `ratio_bounds` is correctly formatted.
"""
if self._ratio_bounds is None:
return
self._validate_ratio_bounds()
self._validate_ratio_type()
self._validate_ratio_format()
self._validate_ratio_cols_prefixed()
self._validate_ratio_cols_exist()
def _validate_ratio_bounds(self):
"""Ensure the ratio value is input correctly.
Raises
------
InputError
If ratio is not a len 2 container of floats.
"""
try:
if len(self._ratio_bounds) != 2:
msg = ("Length of input for ratio_bounds is {} - but is "
"required to be of length 2. Please make sure this "
"input is a len 2 container of floats. If you would "
"like to specify a single ratio value, use the same "
"float for both limits (i.e. ratio_bounds=(1, 1)).")
e = msg.format(len(self._ratio_bounds))
logger.error(e)
raise InputError(e)
except TypeError:
msg = ("Input for ratio_bounds not understood: {!r}. "
"Please make sure this value is a len 2 container "
"of floats.")
e = msg.format(self._ratio_bounds)
logger.error(e)
raise InputError(e) from None
def _validate_ratio_type(self):
"""Ensure that the ratio input is a string.
Raises
------
InputError
If `ratio` is not a string.
"""
if not isinstance(self._ratio, str):
msg = ("Ratio input type {} not understood. Please make sure "
"the ratio input is a string in the form "
"'numerator_column_name/denominator_column_name'. Ratio "
"input: {!r}")
e = msg.format(type(self._ratio), self._ratio)
logger.error(e)
raise InputError(e)
def _validate_ratio_format(self):
"""Validate that the ratio input format is correct and can be parsed.
Raises
------
InputError
If the '/' character is missing or of there are too many
'/' characters.
"""
if '/' not in self._ratio:
msg = ("Ratio input {} does not contain the '/' character. "
"Please make sure the ratio input is a string in the form "
"'numerator_column_name/denominator_column_name'")
e = msg.format(self._ratio)
logger.error(e)
raise InputError(e)
if len(self._ratio_cols) != 2:
msg = ("Ratio input {} contains too many '/' characters. Please "
"make sure the ratio input is a string in the form "
"'numerator_column_name/denominator_column_name'.")
e = msg.format(self._ratio)
logger.error(e)
raise InputError(e)
def _validate_ratio_cols_prefixed(self):
"""Ensure the ratio columns are formatted correctly.
This check is important because the ratio limit step happens
after the meta has been merged (so columns are already prefixed),
but before the hybrid columns are computed. As a result, the ratio
columns _must_ have a valid prefix.
Raises
------
InputError
If ratio columns are not prefixed correctly.
"""
for col in self._ratio_cols:
self.__validate_col_prefix(
col, (SOLAR_PREFIX, WIND_PREFIX), input_name='ratios'
)
def _validate_ratio_cols_exist(self):
"""Ensure the ratio columns exist if a ratio is specified.
Raises
------
FileInputError
If ratio columns are not found in the meta data.
"""
for col in self._ratio_cols:
no_prefix_name = "_".join(col.split('_')[1:])
if not self.data.contains_col(no_prefix_name):
msg = ("Input ratios column {!r} not found in either meta "
"data! Please check the input files {!r} and {!r}")
e = msg.format(no_prefix_name, self.data.solar_fpath,
self.data.wind_fpath)
logger.error(e)
raise FileInputError(e)
@property
def _ratio_cols(self):
"""Get the ratio columns from the ratio input. """
if self._ratio is None:
return []
return self._ratio.strip().split('/')
def hybridize(self):
"""Combine the solar and wind metas and run hybridize methods."""
self._format_meta_pre_merge()
self._merge_solar_wind_meta()
self._verify_lat_long_match_post_merge()
self._format_meta_post_merge()
self._fillna_meta_cols()
self._apply_limits()
self._limit_by_ratio()
self._add_hybrid_cols()
self._sort_hybrid_meta_cols()
def _format_meta_pre_merge(self):
"""Prepare solar and wind meta for merging. """
self.__col_name_map = {
ColNameFormatter.fmt(c): c
for c in self.data.solar_meta.columns.values
}
self._rename_cols(self.data.solar_meta, prefix=SOLAR_PREFIX)
self._rename_cols(self.data.wind_meta, prefix=WIND_PREFIX)
self._save_rep_prof_index_internally()
@staticmethod
def _rename_cols(df, prefix):
"""Replace column names with the ColNameFormatter.fmt is needed. """
df.columns = [
ColNameFormatter.fmt(col_name)
if col_name in NON_DUPLICATE_COLS
else '{}{}'.format(prefix, col_name)
for col_name in df.columns.values
]
def _save_rep_prof_index_internally(self):
"""Save rep profiles index in hybrid meta for access later. """
self.data.solar_meta[self.__solar_rpi_n] = self.data.solar_meta.index
self.data.wind_meta[self.__wind_rpi_n] = self.data.wind_meta.index
def _merge_solar_wind_meta(self):
"""Merge the wind and solar meta DataFrames. """
self._hybrid_meta = self.data.solar_meta.merge(
self.data.wind_meta,
on=ColNameFormatter.fmt(MERGE_COLUMN),
suffixes=[None, '_x'], how=self._merge_type()
)
def _merge_type(self):
"""Determine the type of merge to use for meta based on user input. """
if self._allow_solar_only and self._allow_wind_only:
return 'outer'
elif self._allow_solar_only and not self._allow_wind_only:
return 'left'
elif not self._allow_solar_only and self._allow_wind_only:
return 'right'
return 'inner'
def _format_meta_post_merge(self):
"""Format hybrid meta after merging. """
duplicate_cols = [n for n in self._hybrid_meta.columns if "_x" in n]
self._propagate_duplicate_cols(duplicate_cols)
self._drop_cols(duplicate_cols)
self._hybrid_meta.rename(self.__col_name_map, inplace=True, axis=1)
self._hybrid_meta.index.name = 'gid'
def _propagate_duplicate_cols(self, duplicate_cols):
"""Fill missing column values from outer merge. """
for duplicate in duplicate_cols:
no_suffix = "_".join(duplicate.split("_")[:-1])
null_idx = self._hybrid_meta[no_suffix].isnull()
non_null_vals = self._hybrid_meta.loc[null_idx, duplicate].values
self._hybrid_meta.loc[null_idx, no_suffix] = non_null_vals
def _drop_cols(self, duplicate_cols):
"""Drop any remaning duplicate and 'DROPPED_COLUMNS' columns. """
self._hybrid_meta.drop(
duplicate_cols + DROPPED_COLUMNS,
axis=1, inplace=True, errors='ignore'
)
def _sort_hybrid_meta_cols(self):
"""Sort the columns of the hybrid meta. """
self.__hybrid_meta_cols = sorted(
[c for c in self._hybrid_meta.columns
if not c.startswith(self._INTERNAL_COL_PREFIX)],
key=self._column_sorting_key
)
def _column_sorting_key(self, c):
"""Helper function to sort hybrid meta columns. """
first_index = 0
if c.startswith('hybrid'):
first_index = 1
elif c.startswith('solar'):
first_index = 2
elif c.startswith('wind'):
first_index = 3
elif c == MERGE_COLUMN:
first_index = -1
return first_index, self._hybrid_meta.columns.get_loc(c)
def _verify_lat_long_match_post_merge(self):
"""Verify that all the lat/lon values match post merge."""
lat = self._verify_col_match_post_merge(col_name='latitude')
lon = self._verify_col_match_post_merge(col_name='longitude')
if not lat or not lon:
msg = ("Detected mismatched coordinate values (latitude or "
"longitude) post merge. Please ensure that all matching "
"values of {!r} correspond to the same values of latitude "
"and longitude across the input files {!r} and {!r}")
e = msg.format(MERGE_COLUMN, self.data.solar_fpath,
self.data.wind_fpath)
logger.error(e)
raise FileInputError(e)
def _verify_col_match_post_merge(self, col_name):
"""Verify that all (non-null) values in a column match post merge. """
c1, c2 = col_name, '{}_x'.format(col_name)
if c1 in self._hybrid_meta.columns and c2 in self._hybrid_meta.columns:
compare_df = self._hybrid_meta[
(self._hybrid_meta[c1].notnull())
& (self._hybrid_meta[c2].notnull())
]
return np.allclose(compare_df[c1], compare_df[c2])
else:
return True
def _fillna_meta_cols(self):
"""Fill N/A values as specified by user (and internals). """
for col_name, fill_value in self._fillna.items():
if col_name in self._hybrid_meta.columns:
self._hybrid_meta[col_name].fillna(fill_value, inplace=True)
else:
self.__warn_missing_col(col_name, action='fill')
self._hybrid_meta[self.__solar_rpi_n].fillna(-1, inplace=True)
self._hybrid_meta[self.__wind_rpi_n].fillna(-1, inplace=True)
@staticmethod
def __warn_missing_col(col_name, action):
"""Warn that a column the user request an action for is missing. """
msg = ("Skipping {} values for {!r}: Unable to find column "
"in hybrid meta. Did you forget to prefix with "
"{!r} or {!r}? ")
w = msg.format(action, col_name, SOLAR_PREFIX, WIND_PREFIX)
logger.warning(w)
warn(w, InputWarning)
def _apply_limits(self):
"""Clip column values as specified by user. """
for col_name, max_value in self._limits.items():
if col_name in self._hybrid_meta.columns:
self._hybrid_meta[col_name].clip(upper=max_value, inplace=True)
else:
self.__warn_missing_col(col_name, action='limit')
def _limit_by_ratio(self):
""" Limit the given pair of ratio columns based on input ratio. """
if self._ratio_bounds is None:
return
numerator_col, denominator_col = self._ratio_cols
min_ratio, max_ratio = sorted(self._ratio_bounds)
overlap_idx = self._hybrid_meta[MERGE_COLUMN].isin(
self.data.merge_col_overlap_values
)
numerator_vals = self._hybrid_meta[numerator_col].copy()
denominator_vals = self._hybrid_meta[denominator_col].copy()
ratios = (
numerator_vals.loc[overlap_idx]
/ denominator_vals.loc[overlap_idx]
)
ratio_too_low = (ratios < min_ratio) & overlap_idx
ratio_too_high = (ratios > max_ratio) & overlap_idx
numerator_vals.loc[ratio_too_high] = (
denominator_vals.loc[ratio_too_high].values * max_ratio
)
denominator_vals.loc[ratio_too_low] = (
numerator_vals.loc[ratio_too_low].values / min_ratio
)
h_num_name = "hybrid_{}".format(numerator_col)
h_denom_name = "hybrid_{}".format(denominator_col)
self._hybrid_meta[h_num_name] = numerator_vals.values
self._hybrid_meta[h_denom_name] = denominator_vals.values
def _add_hybrid_cols(self):
"""Add new hybrid columns using registered hybrid methods. """
for new_col_name, method in HYBRID_METHODS.items():
out = method(self)
if out is not None:
try:
self._hybrid_meta[new_col_name] = out
except ValueError as e:
msg = ("Unable to add {!r} column to hybrid meta. The "
"following exception was raised when adding "
"the data output by '{}': {!r}.")
w = msg.format(new_col_name, method.__name__, e)
logger.warning(w)
warn(w, OutputWarning)
@property
def solar_profile_indices_map(self):
"""Map hybrid to solar rep indices.
Returns
-------
hybrid_indices : np.ndarray
Index values corresponding to hybrid rep profiles.
solar_indices : np.ndarray
Index values of the solar rep profiles corresponding
to the hybrid rep profile indices.
"""
if self._hybrid_meta is None:
return np.array([]), np.array([])
idxs = self._hybrid_meta[self.__solar_rpi_n].astype(int)
idxs = idxs[idxs >= 0]
return idxs.index.values, idxs.values
@property
def wind_profile_indices_map(self):
"""Map hybrid to wind rep indices.
Returns
-------
hybrid_indices : np.ndarray
Index values corresponding to hybrid rep profiles.
wind_indices : np.ndarray
Index values of the wind rep profiles corresponding
to the hybrid rep profile indices.
"""
if self._hybrid_meta is None:
return np.array([]), np.array([])
idxs = self._hybrid_meta[self.__wind_rpi_n].astype(int)
idxs = idxs[idxs >= 0]
return idxs.index.values, idxs.values
class Hybridization:
"""Hybridization"""
def __init__(self, solar_fpath, wind_fpath, allow_solar_only=False,
allow_wind_only=False, fillna=None, limits=None,
ratio_bounds=None, ratio='solar_capacity/wind_capacity'):
"""Framework to handle hybridization of SC and corresponding profiles.
``reV`` hybrids computes a "hybrid" wind and solar supply curve,
where each supply curve point contains some wind and some solar
capacity. Various ratio limits on wind-to-solar farm properties
(e.g. wind-to-solar capacity) can be applied during the
hybridization process. Hybrid generation profiles are also
computed during this process.
Parameters
----------
solar_fpath : str
Filepath to rep profile output file to extract solar
profiles and summaries from.
wind_fpath : str
Filepath to rep profile output file to extract wind profiles
and summaries from.
allow_solar_only : bool, optional
Option to allow SC points with only solar capacity
(no wind). By default, ``False``.
allow_wind_only : bool, optional
Option to allow SC points with only wind capacity
(no solar). By default, ``False``.
fillna : dict, optional
Dictionary containing column_name, fill_value pairs
representing any fill values that should be applied after
merging the wind and solar meta. Note that column names will
likely have to be prefixed with ``solar`` or ``wind``.
By default ``None``.
limits : dict, optional
Option to specify mapping (in the form of a dictionary) of
{colum_name: max_value} representing the upper limit
(maximum value) for the values of a column in the merged
meta. For example, ``limits={'solar_capacity': 100}`` would
limit all the values of the solar capacity in the merged
meta to a maximum value of 100. This limit is applied
*BEFORE* ratio calculations. The names of the columns should
match the column names in the merged meta, so they are
likely prefixed with ``solar`` or ``wind``.
By default, ``None`` (no limits applied).
ratio_bounds : tuple, optional
Option to set ratio bounds (in two-tuple form) on the
columns of the ``ratio`` input. For example,
``ratio_bounds=(0.5, 1.5)`` would adjust the values of both
of the ``ratio`` columns such that their ratio is always
between half and double (e.g., no value would be more than
double the other). To specify a single ratio value, use the
same value as the upper and lower bound. For example,
``ratio_bounds=(1, 1)`` would adjust the values of both of
the ``ratio`` columns such that their ratio is always equal.
By default, ``None`` (no limit on the ratio).
ratio : str, optional
Option to specify the columns used to calculate the ratio
that is limited by the `ratio_bounds` input. This input is a
string in the form "{numerator_column}/{denominator_column}".
For example, ``ratio='solar_capacity/wind_capacity'``
would limit the ratio of the solar to wind capacities as
specified by the ``ratio_bounds`` input. If ``ratio_bounds``
is None, this input does nothing. The names of the columns
should be prefixed with one of the prefixes defined as class
variables. By default ``'solar_capacity/wind_capacity'``.
"""
logger.info('Running hybridization of rep profiles with solar_fpath: '
'"{}"'.format(solar_fpath))
logger.info('Running hybridization of rep profiles with solar_fpath: '
'"{}"'.format(wind_fpath))
logger.info('Running hybridization of rep profiles with '
'allow_solar_only: "{}"'.format(allow_solar_only))
logger.info('Running hybridization of rep profiles with '
'allow_wind_only: "{}"'.format(allow_wind_only))
logger.info('Running hybridization of rep profiles with fillna: "{}"'
.format(fillna))
logger.info('Running hybridization of rep profiles with limits: "{}"'
.format(limits))
logger.info('Running hybridization of rep profiles with ratio_bounds: '
'"{}"'.format(ratio_bounds))
logger.info('Running hybridization of rep profiles with ratio: "{}"'
.format(ratio))
self.data = HybridsData(solar_fpath, wind_fpath)
self.meta_hybridizer = MetaHybridizer(
data=self.data, allow_solar_only=allow_solar_only,
allow_wind_only=allow_wind_only, fillna=fillna, limits=limits,
ratio_bounds=ratio_bounds, ratio=ratio
)
self._profiles = None
self._validate_input()
def _validate_input(self):
"""Validate the user input and input files. """
self.data.validate()
self.meta_hybridizer.validate_input()
@property
def solar_meta(self):
"""Summary for the solar representative profiles.
Returns
-------
solar_meta : pd.DataFrame
Summary for the solar representative profiles.
"""
return self.data.solar_meta
@property
def wind_meta(self):
"""Summary for the wind representative profiles.
Returns
-------
wind_meta : pd.DataFrame
Summary for the wind representative profiles.
"""
return self.data.wind_meta
@property
def hybrid_meta(self):
"""Hybridized summary for the representative profiles.
Returns
-------
hybrid_meta : pd.DataFrame
Summary for the hybridized representative profiles.
At the very least, this has a column that the data was merged on.
"""
return self.meta_hybridizer.hybrid_meta
@property
def solar_time_index(self):
"""Get the time index for the solar rep profiles.
Returns
-------
solar_time_index : pd.Datetimeindex
Time index sourced from the solar rep profile file.
"""
return self.data.solar_time_index
@property
def wind_time_index(self):
"""Get the time index for the wind rep profiles.
Returns
-------
wind_time_index : pd.Datetimeindex
Time index sourced from the wind rep profile file.
"""
return self.data.wind_time_index
@property
def hybrid_time_index(self):
"""Get the time index for the hybrid rep profiles.
Returns
-------
hybrid_time_index : pd.Datetimeindex
Time index for the hybrid rep profiles.
"""
return self.data.hybrid_time_index
@property
def profiles(self):
"""Get the arrays of the hybridized representative profiles.
Returns
-------
profiles : dict
Dict of hybridized representative profiles.
"""
return self._profiles
def run(self, fout=None, save_hybrid_meta=True):
"""Run hybridization of profiles and save to disc.
Parameters
----------
fout : str, optional
Filepath to output HDF5 file. If ``None``, output data are
not written to a file. By default, ``None``.
save_hybrid_meta : bool, optional
Flag to save hybrid SC table to hybrid rep profile output.
By default, ``True``.
Returns
-------
str
Filepath to output h5 file.
"""
self.run_meta()
self.run_profiles()
if fout is not None:
self.save_profiles(fout, save_hybrid_meta=save_hybrid_meta)
logger.info('Hybridization of representative profiles complete!')
return fout
def run_meta(self):
"""Compute the hybridized profiles.
Returns
-------
`Hybridization`
Instance of Hybridization object (itself) containing the
hybridized meta as an attribute.
"""
self.meta_hybridizer.hybridize()
return self
def run_profiles(self):
"""Compute all hybridized profiles.
Returns
-------
`Hybridization`
Instance of Hybridization object (itself) containing the
hybridized profiles as attributes.
"""
logger.info('Running hybrid profile calculations.')
self._init_profiles()
self._compute_hybridized_profile_components()
self._compute_hybridized_profiles_from_components()
logger.info('Profile hybridization complete.')
return self
def _init_profiles(self):
"""Initialize the output rep profiles attribute."""
self._profiles = {
k: np.zeros((len(self.hybrid_time_index), len(self.hybrid_meta)),
dtype=np.float32)
for k in OUTPUT_PROFILE_NAMES}
def _compute_hybridized_profile_components(self):
"""Compute the resource components of the hybridized profiles. """
for params in self.__rep_profile_hybridization_params:
col, (hybrid_idxs, solar_idxs), fpath, p_name, dset_name = params
capacity = self.hybrid_meta.loc[hybrid_idxs, col].values
with Resource(fpath) as res:
data = res[dset_name,
res.time_index.isin(self.hybrid_time_index)]
self._profiles[p_name][:, hybrid_idxs] = (data[:, solar_idxs]
* capacity)
@property
def __rep_profile_hybridization_params(self):
"""Zip the rep profile hybridization parameters. """
cap_col_names = ['hybrid_solar_capacity', 'hybrid_wind_capacity']
idx_maps = [self.meta_hybridizer.solar_profile_indices_map,
self.meta_hybridizer.wind_profile_indices_map]
fpaths = [self.data.solar_fpath, self.data.wind_fpath]
zipped = zip(cap_col_names, idx_maps, fpaths, OUTPUT_PROFILE_NAMES[1:],
self.data.profile_dset_names)
return zipped
def _compute_hybridized_profiles_from_components(self):
"""Compute the hybridized profiles from the resource components. """
hp_name, sp_name, wp_name = OUTPUT_PROFILE_NAMES
self._profiles[hp_name] = (self._profiles[sp_name]
+ self._profiles[wp_name])
def _init_h5_out(self, fout, save_hybrid_meta=True):
"""Initialize an output h5 file for hybrid profiles.
Parameters
----------
fout : str
Filepath to output h5 file.
save_hybrid_meta : bool
Flag to save hybrid SC table to hybrid rep profile output.
"""
dsets = []
shapes = {}
attrs = {}
chunks = {}
dtypes = {}
for dset, data in self.profiles.items():
dsets.append(dset)
shapes[dset] = data.shape
chunks[dset] = None
attrs[dset] = {Outputs.UNIT_ATTR: "MW"}
dtypes[dset] = data.dtype
meta = self.hybrid_meta.copy()
for c in meta.columns:
try:
meta[c] = pd.to_numeric(meta[c])
except ValueError:
pass
Outputs.init_h5(fout, dsets, shapes, attrs, chunks, dtypes,
meta, time_index=self.hybrid_time_index)
if save_hybrid_meta:
with Outputs(fout, mode='a') as out:
hybrid_meta = to_records_array(self.hybrid_meta)
out._create_dset('meta', hybrid_meta.shape,
hybrid_meta.dtype, data=hybrid_meta)
def _write_h5_out(self, fout, save_hybrid_meta=True):
"""Write hybrid profiles and meta to an output file.
Parameters
----------
fout : str
Filepath to output h5 file.
save_hybrid_meta : bool
Flag to save hybrid SC table to hybrid rep profile output.
"""
with Outputs(fout, mode='a') as out:
if 'meta' in out.datasets and save_hybrid_meta:
hybrid_meta = to_records_array(self.hybrid_meta)
out['meta'] = hybrid_meta
for dset, data in self.profiles.items():
out[dset] = data
def save_profiles(self, fout, save_hybrid_meta=True):
"""Initialize fout and save profiles.
Parameters
----------
fout : str
Filepath to output h5 file.
save_hybrid_meta : bool
Flag to save hybrid SC table to hybrid rep profile output.
"""
self._init_h5_out(fout, save_hybrid_meta=save_hybrid_meta)
self._write_h5_out(fout, save_hybrid_meta=save_hybrid_meta) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/hris/model/multipart_form_field_request.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.hris.model.encoding_enum import EncodingEnum
globals()['EncodingEnum'] = EncodingEnum
class MultipartFormFieldRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('name',): {
'min_length': 1,
},
('data',): {
'min_length': 1,
},
('file_name',): {
'min_length': 1,
},
('content_type',): {
'min_length': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'name': (str,), # noqa: E501
'data': (str,), # noqa: E501
'encoding': (EncodingEnum, str, none_type,),
'file_name': (str, none_type, none_type,), # noqa: E501
'content_type': (str, none_type, none_type,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'data': 'data', # noqa: E501
'encoding': 'encoding', # noqa: E501
'file_name': 'file_name', # noqa: E501
'content_type': 'content_type', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, name, data, *args, **kwargs): # noqa: E501
"""MultipartFormFieldRequest - a model defined in OpenAPI
Args:
name (str): The name of the form field
data (str): The data for the form field.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
encoding (bool, dict, float, int, list, str, none_type): The encoding of the value of `data`. Defaults to `RAW` if not defined.. [optional] # noqa: E501
file_name (str, none_type): The file name of the form field, if the field is for a file.. [optional] # noqa: E501
content_type (str, none_type): The MIME type of the file, if the field is for a file.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
self.data = data
self.encoding = kwargs.get("encoding", None)
self.file_name = kwargs.get("file_name", None)
self.content_type = kwargs.get("content_type", None)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, data, *args, **kwargs): # noqa: E501
"""MultipartFormFieldRequest - a model defined in OpenAPI
Args:
name (str): The name of the form field
data (str): The data for the form field.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
encoding (bool, dict, float, int, list, str, none_type): The encoding of the value of `data`. Defaults to `RAW` if not defined.. [optional] # noqa: E501
file_name (str, none_type): The file name of the form field, if the field is for a file.. [optional] # noqa: E501
content_type (str, none_type): The MIME type of the file, if the field is for a file.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name: Union[str] = name
self.data: Union[str] = data
self.encoding: Union[bool, dict, float, int, list, str, none_type] = kwargs.get("encoding", None)
self.file_name: Union[str, none_type] = kwargs.get("file_name", None)
self.content_type: Union[str, none_type] = kwargs.get("content_type", None) | PypiClean |
/FlaskCms-0.0.4.tar.gz/FlaskCms-0.0.4/flask_cms/static/js/ace/theme-tomorrow_night_eighties.js | ace.define("ace/theme/tomorrow_night_eighties",["require","exports","module","ace/lib/dom"], function(require, exports, module) {
exports.isDark = true;
exports.cssClass = "ace-tomorrow-night-eighties";
exports.cssText = ".ace-tomorrow-night-eighties .ace_gutter {\
background: #272727;\
color: #CCC\
}\
.ace-tomorrow-night-eighties .ace_print-margin {\
width: 1px;\
background: #272727\
}\
.ace-tomorrow-night-eighties {\
background-color: #2D2D2D;\
color: #CCCCCC\
}\
.ace-tomorrow-night-eighties .ace_constant.ace_other,\
.ace-tomorrow-night-eighties .ace_cursor {\
color: #CCCCCC\
}\
.ace-tomorrow-night-eighties .ace_marker-layer .ace_selection {\
background: #515151\
}\
.ace-tomorrow-night-eighties.ace_multiselect .ace_selection.ace_start {\
box-shadow: 0 0 3px 0px #2D2D2D;\
border-radius: 2px\
}\
.ace-tomorrow-night-eighties .ace_marker-layer .ace_step {\
background: rgb(102, 82, 0)\
}\
.ace-tomorrow-night-eighties .ace_marker-layer .ace_bracket {\
margin: -1px 0 0 -1px;\
border: 1px solid #6A6A6A\
}\
.ace-tomorrow-night-bright .ace_stack {\
background: rgb(66, 90, 44)\
}\
.ace-tomorrow-night-eighties .ace_marker-layer .ace_active-line {\
background: #393939\
}\
.ace-tomorrow-night-eighties .ace_gutter-active-line {\
background-color: #393939\
}\
.ace-tomorrow-night-eighties .ace_marker-layer .ace_selected-word {\
border: 1px solid #515151\
}\
.ace-tomorrow-night-eighties .ace_invisible {\
color: #6A6A6A\
}\
.ace-tomorrow-night-eighties .ace_keyword,\
.ace-tomorrow-night-eighties .ace_meta,\
.ace-tomorrow-night-eighties .ace_storage,\
.ace-tomorrow-night-eighties .ace_storage.ace_type,\
.ace-tomorrow-night-eighties .ace_support.ace_type {\
color: #CC99CC\
}\
.ace-tomorrow-night-eighties .ace_keyword.ace_operator {\
color: #66CCCC\
}\
.ace-tomorrow-night-eighties .ace_constant.ace_character,\
.ace-tomorrow-night-eighties .ace_constant.ace_language,\
.ace-tomorrow-night-eighties .ace_constant.ace_numeric,\
.ace-tomorrow-night-eighties .ace_keyword.ace_other.ace_unit,\
.ace-tomorrow-night-eighties .ace_support.ace_constant,\
.ace-tomorrow-night-eighties .ace_variable.ace_parameter {\
color: #F99157\
}\
.ace-tomorrow-night-eighties .ace_invalid {\
color: #CDCDCD;\
background-color: #F2777A\
}\
.ace-tomorrow-night-eighties .ace_invalid.ace_deprecated {\
color: #CDCDCD;\
background-color: #CC99CC\
}\
.ace-tomorrow-night-eighties .ace_fold {\
background-color: #6699CC;\
border-color: #CCCCCC\
}\
.ace-tomorrow-night-eighties .ace_entity.ace_name.ace_function,\
.ace-tomorrow-night-eighties .ace_support.ace_function,\
.ace-tomorrow-night-eighties .ace_variable {\
color: #6699CC\
}\
.ace-tomorrow-night-eighties .ace_support.ace_class,\
.ace-tomorrow-night-eighties .ace_support.ace_type {\
color: #FFCC66\
}\
.ace-tomorrow-night-eighties .ace_heading,\
.ace-tomorrow-night-eighties .ace_markup.ace_heading,\
.ace-tomorrow-night-eighties .ace_string {\
color: #99CC99\
}\
.ace-tomorrow-night-eighties .ace_comment {\
color: #999999\
}\
.ace-tomorrow-night-eighties .ace_entity.ace_name.ace_tag,\
.ace-tomorrow-night-eighties .ace_entity.ace_other.ace_attribute-name,\
.ace-tomorrow-night-eighties .ace_meta.ace_tag,\
.ace-tomorrow-night-eighties .ace_variable {\
color: #F2777A\
}\
.ace-tomorrow-night-eighties .ace_indent-guide {\
background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAEklEQVQImWPQ09NrYAgMjP4PAAtGAwchHMyAAAAAAElFTkSuQmCC) right repeat-y\
}";
var dom = require("../lib/dom");
dom.importCssString(exports.cssText, exports.cssClass);
}); | PypiClean |
/GeophPy-0.32.2.tar.gz/GeophPy-0.32.2/docs/_build/html/_static/js/theme.js | require=function r(s,a,l){function c(e,n){if(!a[e]){if(!s[e]){var i="function"==typeof require&&require;if(!n&&i)return i(e,!0);if(u)return u(e,!0);var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}var o=a[e]={exports:{}};s[e][0].call(o.exports,function(n){return c(s[e][1][n]||n)},o,o.exports,r,s,a,l)}return a[e].exports}for(var u="function"==typeof require&&require,n=0;n<l.length;n++)c(l[n]);return c}({"sphinx-rtd-theme":[function(n,e,i){var jQuery="undefined"!=typeof window?window.jQuery:n("jquery");e.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(e){var i=this;void 0===e&&(e=!0),i.isRunning||(i.isRunning=!0,jQuery(function(n){i.init(n),i.reset(),i.win.on("hashchange",i.reset),e&&i.win.on("scroll",function(){i.linkScroll||i.winScroll||(i.winScroll=!0,requestAnimationFrame(function(){i.onScroll()}))}),i.win.on("resize",function(){i.winResize||(i.winResize=!0,requestAnimationFrame(function(){i.onResize()}))}),i.onResize()}))},enableSticky:function(){this.enable(!0)},init:function(i){i(document);var t=this;this.navBar=i("div.wy-side-scroll:first"),this.win=i(window),i(document).on("click","[data-toggle='wy-nav-top']",function(){i("[data-toggle='wy-nav-shift']").toggleClass("shift"),i("[data-toggle='rst-versions']").toggleClass("shift")}).on("click",".wy-menu-vertical .current ul li a",function(){var n=i(this);i("[data-toggle='wy-nav-shift']").removeClass("shift"),i("[data-toggle='rst-versions']").toggleClass("shift"),t.toggleCurrent(n),t.hashChange()}).on("click","[data-toggle='rst-current-version']",function(){i("[data-toggle='rst-versions']").toggleClass("shift-up")}),i("table.docutils:not(.field-list,.footnote,.citation)").wrap("<div class='wy-table-responsive'></div>"),i("table.docutils.footnote").wrap("<div class='wy-table-responsive footnote'></div>"),i("table.docutils.citation").wrap("<div class='wy-table-responsive citation'></div>"),i(".wy-menu-vertical ul").not(".simple").siblings("a").each(function(){var e=i(this);expand=i('<span class="toctree-expand"></span>'),expand.on("click",function(n){return t.toggleCurrent(e),n.stopPropagation(),!1}),e.prepend(expand)})},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),i=e.find('[href="'+n+'"]');if(0===i.length){var t=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(i=e.find('[href="#'+t.attr("id")+'"]')).length&&(i=e.find('[href="#"]'))}0<i.length&&($(".wy-menu-vertical .current").removeClass("current"),i.addClass("current"),i.closest("li.toctree-l1").addClass("current"),i.closest("li.toctree-l1").parent().addClass("current"),i.closest("li.toctree-l1").addClass("current"),i.closest("li.toctree-l2").addClass("current"),i.closest("li.toctree-l3").addClass("current"),i.closest("li.toctree-l4").addClass("current"),i[0].scrollIntoView())}catch(o){console.log("Error expanding nav for anchor",o)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,i=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(i),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",function(){this.linkScroll=!1})},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current"),e.siblings().find("li.current").removeClass("current"),e.find("> ul li.current").removeClass("current"),e.toggleClass("current")}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:e.exports.ThemeNav,StickyNav:e.exports.ThemeNav}),function(){for(var r=0,n=["ms","moz","webkit","o"],e=0;e<n.length&&!window.requestAnimationFrame;++e)window.requestAnimationFrame=window[n[e]+"RequestAnimationFrame"],window.cancelAnimationFrame=window[n[e]+"CancelAnimationFrame"]||window[n[e]+"CancelRequestAnimationFrame"];window.requestAnimationFrame||(window.requestAnimationFrame=function(n,e){var i=(new Date).getTime(),t=Math.max(0,16-(i-r)),o=window.setTimeout(function(){n(i+t)},t);return r=i+t,o}),window.cancelAnimationFrame||(window.cancelAnimationFrame=function(n){clearTimeout(n)})}()},{jquery:"jquery"}]},{},["sphinx-rtd-theme"]); | PypiClean |
/FastFlask-1.2.32-py3-none-any.whl/werkzeug/datastructures.py | import base64
import codecs
import mimetypes
import re
import warnings
from collections.abc import Collection
from collections.abc import MutableSet
from copy import deepcopy
from io import BytesIO
from itertools import repeat
from os import fspath
from . import exceptions
from ._internal import _make_encode_wrapper
from ._internal import _missing
from .filesystem import get_filesystem_encoding
def is_immutable(self):
raise TypeError(f"{type(self).__name__!r} objects are immutable")
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
yield from mapping.items(multi=True)
elif isinstance(mapping, dict):
for key, value in mapping.items():
if isinstance(value, (tuple, list)):
for v in value:
yield key, v
else:
yield key, value
else:
yield from mapping
class ImmutableListMixin:
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
def __imul__(self, other):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
def remove(self, item):
is_immutable(self)
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, key=None, reverse=False):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return f"{type(self).__name__}({list.__repr__(self)})"
class ImmutableDictMixin:
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super().__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return self.items()
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(self.items(multi=True)),)
def _iter_hashitems(self):
return self.items(multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
def _calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
class UpdateDictMixin(dict):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def setdefault(self, key, default=None):
modified = key not in self
rv = super().setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super().pop(key)
else:
rv = super().pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = _calls_update("__setitem__")
__delitem__ = _calls_update("__delitem__")
clear = _calls_update("clear")
popitem = _calls_update("popitem")
update = _calls_update("update")
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
except KeyError:
return default
if type is not None:
try:
rv = type(rv)
except ValueError:
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in mapping.lists()))
elif isinstance(mapping, dict):
tmp = {}
for key, value in mapping.items():
if isinstance(value, (tuple, list)):
if len(value) == 0:
continue
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __iter__(self):
# Work around https://bugs.python.org/issue43246.
# (`return super().__iter__()` also works here, which makes this look
# even more like it should be a no-op, yet it isn't.)
return dict.__iter__(self)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
lst = dict.__getitem__(self, key)
if len(lst) > 0:
return lst[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just like `get`,
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default_list: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in dict.items(self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a iterator of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in dict.items(self):
yield key, list(values)
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in dict.values(self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return dict.values(self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def deepcopy(self, memo=None):
"""Return a deep copy of this object."""
return self.__class__(deepcopy(self.to_dict(flat=False), memo))
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(self.items())
return dict(self.lists())
def update(self, mapping):
"""update() extends rather than replaces existing key lists:
>>> a = MultiDict({'x': 1})
>>> b = MultiDict({'x': 2, 'y': 3})
>>> a.update(b)
>>> a
MultiDict([('y', 3), ('x', 1), ('x', 2)])
If the value list for a key in ``other_dict`` is empty, no new values
will be added to the dict and the key will not be created:
>>> x = {'empty_list': []}
>>> y = MultiDict()
>>> y.update(x)
>>> y
MultiDict([])
"""
for key, value in iter_multi_items(mapping):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
lst = dict.pop(self, key)
if len(lst) == 0:
raise exceptions.BadRequestKeyError(key)
return lst[0]
except KeyError:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(key) from None
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
if len(item[1]) == 0:
raise exceptions.BadRequestKeyError(item[0])
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0]) from None
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0]) from None
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.deepcopy(memo=memo)
def __repr__(self):
return f"{type(self).__name__}({list(self.items(multi=True))!r})"
class _omd_bucket:
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ("prev", "key", "value", "next")
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iter(self.items(multi=True))
iter2 = iter(other.items(multi=True))
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in self.lists():
if other.getlist(key) != values:
return False
return True
__hash__ = None
def __reduce_ex__(self, protocol):
return type(self), (list(self.items(multi=True)),)
def __getstate__(self):
return list(self.items(multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in self.items())
def __iter__(self):
return iter(self.keys())
def values(self):
return (value for key, value in self.items())
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for _key, values in self.lists():
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError("setlistdefault is unsupported for ordered multi dicts")
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(key) from None
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0]) from None
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(e.args[0]) from None
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return http.dump_options_header(
value, {k.replace("_", "-"): v for k, v in kw.items()}
)
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode("latin-1")
if not isinstance(value, str):
value = str(value)
return value
class Headers:
"""An object that stores some headers. It has a dict-like interface,
but is ordered, can store the same key multiple times, and iterating
yields ``(key, value)`` pairs instead of only keys.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, int):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, str):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
def lowered(item):
return (item[0].lower(),) + item[1:]
return other.__class__ is self.__class__ and set(
map(lowered, other._list)
) == set(map(lowered, self._list))
__hash__ = None
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode("latin1")
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just like
:meth:`get`, :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode("latin1")
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in self.items(lower):
yield key
def values(self):
for _, value in self.items():
yield value
def extend(self, *args, **kwargs):
"""Extend headers in this object with items from another object
containing header items as well as keyword arguments.
To replace existing keys instead of extending, use
:meth:`update` instead.
If provided, the first argument can be another :class:`Headers`
object, a :class:`MultiDict`, :class:`dict`, or iterable of
pairs.
.. versionchanged:: 1.0
Support :class:`MultiDict`. Allow passing ``kwargs``.
"""
if len(args) > 1:
raise TypeError(f"update expected at most 1 arguments, got {len(args)}")
if args:
for key, value in iter_multi_items(args[0]):
self.add(key, value)
for key, value in iter_multi_items(kwargs):
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (int, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, int):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
def has_key(self, key):
"""
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Use ``key in data``
instead.
"""
warnings.warn(
"'has_key' is deprecated and will be removed in Werkzeug"
" 2.1. Use 'key in data' instead.",
DeprecationWarning,
stacklevel=2,
)
return key in self
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_key = _unicodify_header_value(_key)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, str):
raise TypeError("Value should be a string.")
if "\n" in value or "\r" in value:
raise ValueError(
"Detected newline in header value. This is "
"a potential security problem"
)
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_key = _unicodify_header_value(_key)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, _old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first occurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1 :] = [t for t in listiter if t[0].lower() != ikey]
def setlist(self, key, values):
"""Remove any existing values for a header and add new ones.
:param key: The header key to set.
:param values: An iterable of values to set for the key.
.. versionadded:: 1.0
"""
if values:
values_iter = iter(values)
self.set(key, next(values_iter))
for value in values_iter:
self.add(key, value)
else:
self.remove(key)
def setdefault(self, key, default):
"""Return the first value for the key if it is in the headers,
otherwise set the header to the value given by ``default`` and
return that.
:param key: The header key to get.
:param default: The value to set for the key if it is not in the
headers.
"""
if key in self:
return self[key]
self.set(key, default)
return default
def setlistdefault(self, key, default):
"""Return the list of values for the key if it is in the
headers, otherwise set the header to the list of values given
by ``default`` and return that.
Unlike :meth:`MultiDict.setlistdefault`, modifying the returned
list will not affect the headers.
:param key: The header key to get.
:param default: An iterable of values to set for the key if it
is not in the headers.
.. versionadded:: 1.0
"""
if key not in self:
self.setlist(key, default)
return self.getlist(key)
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, int)):
if isinstance(key, int):
value = [value]
value = [
(_unicodify_header_value(k), _unicodify_header_value(v))
for (k, v) in value
]
for (_, v) in value:
self._validate_value(v)
if isinstance(key, int):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def update(self, *args, **kwargs):
"""Replace headers in this object with items from another
headers object and keyword arguments.
To extend existing keys instead of replacing, use :meth:`extend`
instead.
If provided, the first argument can be another :class:`Headers`
object, a :class:`MultiDict`, :class:`dict`, or iterable of
pairs.
.. versionadded:: 1.0
"""
if len(args) > 1:
raise TypeError(f"update expected at most 1 arguments, got {len(args)}")
if args:
mapping = args[0]
if isinstance(mapping, (Headers, MultiDict)):
for key in mapping.keys():
self.setlist(key, mapping.getlist(key))
elif isinstance(mapping, dict):
for key, value in mapping.items():
if isinstance(value, (list, tuple)):
self.setlist(key, value)
else:
self.set(key, value)
else:
for key, value in mapping:
self.set(key, value)
for key, value in kwargs.items():
if isinstance(value, (list, tuple)):
self.setlist(key, value)
else:
self.set(key, value)
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
:return: list
"""
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append(f"{key}: {value}")
strs.append("\r\n")
return "\r\n".join(strs)
def __repr__(self):
return f"{type(self).__name__}({list(self)!r})"
class ImmutableHeadersMixin:
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key, **kwargs):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def set(self, _key, _value, **kw):
is_immutable(self)
def setlist(self, key, values):
is_immutable(self)
def add(self, _key, _value, **kw):
is_immutable(self)
def add_header(self, _key, _value, **_kw):
is_immutable(self)
def remove(self, key):
is_immutable(self)
def extend(self, *args, **kwargs):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, key=None, default=_missing):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
def setlistdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
__hash__ = None
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
if not isinstance(key, str):
raise KeyError(key)
key = key.upper().replace("-", "_")
if key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ[f"HTTP_{key}"])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in self.environ.items():
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield (
key[5:].replace("_", "-").title(),
_unicodify_header_value(value),
)
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH") and value:
yield (key.replace("_", "-").title(), _unicodify_header_value(value))
def copy(self):
raise TypeError(f"cannot create {type(self).__name__!r} copies")
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = list(dicts) or []
@classmethod
def fromkeys(cls, keys, value=None):
raise TypeError(f"cannot create {cls.__name__!r} instances by fromkeys")
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def _keys_impl(self):
"""This function exists so __len__ can be implemented more efficiently,
saving one list creation from an iterator.
"""
rv = set()
rv.update(*self.dicts)
return rv
def keys(self):
return self._keys_impl()
def __iter__(self):
return iter(self.keys())
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in d.items(multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for _key, value in self.items():
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in d.lists():
rv.setdefault(key, []).extend(values)
return list(rv.items())
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow mutable copy of this object.
This returns a :class:`MultiDict` representing the data at the
time of copying. The copy will no longer reflect changes to the
wrapped dicts.
.. versionchanged:: 0.15
Return a mutable :class:`MultiDict`.
"""
return MultiDict(self)
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
if flat:
return dict(self.items())
return dict(self.lists())
def __len__(self):
return len(self._keys_impl())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
def has_key(self, key):
"""
.. deprecated:: 2.0
Will be removed in Werkzeug 2.1. Use ``key in data``
instead.
"""
warnings.warn(
"'has_key' is deprecated and will be removed in Werkzeug"
" 2.1. Use 'key in data' instead.",
DeprecationWarning,
stacklevel=2,
)
return key in self
def __repr__(self):
return f"{type(self).__name__}({self.dicts!r})"
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, str):
if filename is None:
filename = file
file = open(file, "rb")
if filename and content_type is None:
content_type = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return f"{type(self).__name__}({dict.__repr__(self)})"
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(self.items(multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by specificity
and quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
.. versionchanged:: 1.0.0
:class:`Accept` internal values are no longer ordered
alphabetically for equal quality tags. Instead the initial
order is preserved.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = sorted(
values, key=lambda x: (self._specificity(x[0]), x[1]), reverse=True
)
list.__init__(self, values)
def _specificity(self, value):
"""Returns a tuple describing the value's specificity."""
return (value != "*",)
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == "*" or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, str):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, _quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
pairs_str = ", ".join(f"({x!r}, {y})" for x, y in self)
return f"{type(self).__name__}([{pairs_str}])"
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, str):
for idx, (item, _quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = f"{value};q={quality}"
result.append(value)
return ",".join(result)
def __str__(self):
return self.to_header()
def _best_single_match(self, match):
for client_item, quality in self:
if self._value_matches(match, client_item):
# self is sorted by specificity descending, we can exit
return client_item, quality
return None
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the specificity and quality of the client. If two items have the
same quality and specificity, the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
result = default
best_quality = -1
best_specificity = (-1,)
for server_item in matches:
match = self._best_single_match(server_item)
if not match:
continue
client_item, quality = match
specificity = self._specificity(client_item)
if quality <= 0 or quality < best_quality:
continue
# better quality or same quality but more specific => better match
if quality > best_quality or specificity > best_specificity:
result = server_item
best_quality = quality
best_specificity = specificity
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
_mime_split_re = re.compile(r"/|(?:\s*;\s*)")
def _normalize_mime(value):
return _mime_split_re.split(value.lower())
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _specificity(self, value):
return tuple(x != "*" for x in _mime_split_re.split(value))
def _value_matches(self, value, item):
# item comes from the client, can't match if it's invalid.
if "/" not in item:
return False
# value comes from the application, tell the developer when it
# doesn't look valid.
if "/" not in value:
raise ValueError(f"invalid mimetype {value!r}")
# Split the match value into type, subtype, and a sorted list of parameters.
normalized_value = _normalize_mime(value)
value_type, value_subtype = normalized_value[:2]
value_params = sorted(normalized_value[2:])
# "*/*" is the only valid value that can start with "*".
if value_type == "*" and value_subtype != "*":
raise ValueError(f"invalid mimetype {value!r}")
# Split the accept item into type, subtype, and parameters.
normalized_item = _normalize_mime(item)
item_type, item_subtype = normalized_item[:2]
item_params = sorted(normalized_item[2:])
# "*/not-*" from the client is invalid, can't match.
if item_type == "*" and item_subtype != "*":
return False
return (
(item_type == "*" and item_subtype == "*")
or (value_type == "*" and value_subtype == "*")
) or (
item_type == value_type
and (
item_subtype == "*"
or value_subtype == "*"
or (item_subtype == value_subtype and item_params == value_params)
)
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
"text/html" in self or "application/xhtml+xml" in self or self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return "application/xhtml+xml" in self or "application/xml" in self
@property
def accept_json(self):
"""True if this object accepts JSON."""
return "application/json" in self
_locale_delim_re = re.compile(r"[_-]")
def _normalize_lang(value):
"""Process a language tag for matching."""
return _locale_delim_re.split(value.lower())
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for language tags."""
def _value_matches(self, value, item):
return item == "*" or _normalize_lang(value) == _normalize_lang(item)
def best_match(self, matches, default=None):
"""Given a list of supported values, finds the best match from
the list of accepted values.
Language tags are normalized for the purpose of matching, but
are returned unchanged.
If no exact match is found, this will fall back to matching
the first subtag (primary language only), first with the
accepted values then with the match values. This partial is not
applied to any other language subtags.
The default is returned if no exact or fallback match is found.
:param matches: A list of supported languages to find a match.
:param default: The value that is returned if none match.
"""
# Look for an exact match first. If a client accepts "en-US",
# "en-US" is a valid match at this point.
result = super().best_match(matches)
if result is not None:
return result
# Fall back to accepting primary tags. If a client accepts
# "en-US", "en" is a valid match at this point. Need to use
# re.split to account for 2 or 3 letter codes.
fallback = Accept(
[(_locale_delim_re.split(item[0], 1)[0], item[1]) for item in self]
)
result = fallback.best_match(matches)
if result is not None:
return result
# Fall back to matching primary tags. If the client accepts
# "en", "en-US" is a valid match at this point.
fallback_matches = [_locale_delim_re.split(item, 1)[0] for item in matches]
result = super().best_match(fallback_matches)
# Return a value from the original match list. Find the first
# original value that starts with the matched primary tag.
if result is not None:
return next(item for item in matches if item.startswith(result))
return default
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == "*" or _normalize(value) == _normalize(item)
def cache_control_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass.
.. versionchanged:: 2.0
Renamed from ``cache_property``.
"""
return property(
lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
f"accessor for {key!r}",
)
def cache_property(key, empty, type):
warnings.warn(
"'cache_property' is renamed to 'cache_control_property'. The"
" old name is deprecated and will be removed in Werkzeug 2.1.",
DeprecationWarning,
stacklevel=2,
)
return cache_control_property(key, empty, type)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_control_property("no-cache", "*", None)
no_store = cache_control_property("no-store", None, bool)
max_age = cache_control_property("max-age", -1, int)
no_transform = cache_control_property("no-transform", None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
return None
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key, None)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return http.dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
kv_str = " ".join(f"{k}={v!r}" for k, v in sorted(self.items()))
return f"<{type(self).__name__} {kv_str}>"
cache_property = staticmethod(cache_control_property)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_control_property("max-stale", "*", int)
min_fresh = cache_control_property("min-fresh", "*", int)
only_if_cached = cache_control_property("only-if-cached", None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_control_property("public", None, bool)
private = cache_control_property("private", "*", None)
must_revalidate = cache_control_property("must-revalidate", None, bool)
proxy_revalidate = cache_control_property("proxy-revalidate", None, bool)
s_maxage = cache_control_property("s-maxage", None, None)
immutable = cache_control_property("immutable", None, bool)
def csp_property(key):
"""Return a new property object for a content security policy header.
Useful if you want to add support for a csp extension in a
subclass.
"""
return property(
lambda x: x._get_value(key),
lambda x, v: x._set_value(key, v),
lambda x: x._del_value(key),
f"accessor for {key!r}",
)
class ContentSecurityPolicy(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Content Security Policy
header. It has accessors for all the level 3 policies.
Because the csp directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`ContentSecuirtyPolicy` object again
you can convert the object into a string or call the
:meth:`to_header` method. If you plan to subclass it and add your
own items have a look at the sourcecode for that class.
.. versionadded:: 1.0.0
Support for Content Security Policy headers was added.
"""
base_uri = csp_property("base-uri")
child_src = csp_property("child-src")
connect_src = csp_property("connect-src")
default_src = csp_property("default-src")
font_src = csp_property("font-src")
form_action = csp_property("form-action")
frame_ancestors = csp_property("frame-ancestors")
frame_src = csp_property("frame-src")
img_src = csp_property("img-src")
manifest_src = csp_property("manifest-src")
media_src = csp_property("media-src")
navigate_to = csp_property("navigate-to")
object_src = csp_property("object-src")
prefetch_src = csp_property("prefetch-src")
plugin_types = csp_property("plugin-types")
report_to = csp_property("report-to")
report_uri = csp_property("report-uri")
sandbox = csp_property("sandbox")
script_src = csp_property("script-src")
script_src_attr = csp_property("script-src-attr")
script_src_elem = csp_property("script-src-elem")
style_src = csp_property("style-src")
style_src_attr = csp_property("style-src-attr")
style_src_elem = csp_property("style-src-elem")
worker_src = csp_property("worker-src")
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_value(self, key):
"""Used internally by the accessor properties."""
return self.get(key)
def _set_value(self, key, value):
"""Used internally by the accessor properties."""
if value is None:
self.pop(key, None)
else:
self[key] = value
def _del_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return http.dump_csp_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
kv_str = " ".join(f"{k}={v!r}" for k, v in sorted(self.items()))
return f"<{type(self).__name__} {kv_str}>"
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return f"<{type(self).__name__} {dict.__repr__(self)}>"
class HeaderSet(MutableSet):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = {x.lower() for x in self._headers}
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ", ".join(map(http.quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __bool__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return f"{type(self).__name__}({self._headers!r})"
class ETags(Collection):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
if not star_tag and strong_etags:
self._strong = frozenset(strong_etags)
else:
self._strong = frozenset()
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def is_strong(self, etag):
"""Check if an etag is strong."""
return etag in self._strong
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return self.is_strong(etag)
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = http.unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return "*"
return ", ".join(
[f'"{x}"' for x in self._strong] + [f'W/"{x}"' for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError("either tag or data required, but at least one")
if etag is None:
etag = http.generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __bool__(self):
return bool(self.star_tag or self._strong or self._weak)
def __str__(self):
return self.to_header()
def __len__(self):
return len(self._strong)
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return f"<{type(self).__name__} {str(self)!r}>"
class IfRange:
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http.http_date(self.date)
if self.etag is not None:
return http.quote_etag(self.etag)
return ""
def __str__(self):
return self.to_header()
def __repr__(self):
return f"<{type(self).__name__} {str(self)!r}>"
class Range:
"""Represents a ``Range`` header. All methods only support only
bytes as the unit. Stores a list of ranges if given, but the methods
only work if only one range is provided.
:raise ValueError: If the ranges provided are invalid.
.. versionchanged:: 0.15
The ranges passed in are validated.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
for start, end in ranges:
if start is None or (end is not None and (start < 0 or start >= end)):
raise ValueError(f"{(start, end)} is not a valid range.")
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != "bytes" or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if http.is_byte_range_valid(start, end, length):
return start, min(end, length)
return None
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
return None
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(f"{begin}-" if begin >= 0 else str(begin))
else:
ranges.append(f"{begin}-{end - 1}")
return f"{self.units}={','.join(ranges)}"
def to_content_range_header(self, length):
"""Converts the object into `Content-Range` HTTP header,
based on given length
"""
range = self.range_for_length(length)
if range is not None:
return f"{self.units} {range[0]}-{range[1] - 1}/{length}"
return None
def __str__(self):
return self.to_header()
def __repr__(self):
return f"<{type(self).__name__} {str(self)!r}>"
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
class ContentRange:
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert http.is_byte_range_valid(start, stop, length), "Bad range provided"
self.on_update = on_update
self.set(start, stop, length, units)
#: The units to use, usually "bytes"
units = _callback_property("_units")
#: The start point of the range or `None`.
start = _callback_property("_start")
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property("_stop")
#: The length of the range or `None`.
length = _callback_property("_length")
def set(self, start, stop, length=None, units="bytes"):
"""Simple method to update the ranges."""
assert http.is_byte_range_valid(start, stop, length), "Bad range provided"
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ""
if self.length is None:
length = "*"
else:
length = self.length
if self.start is None:
return f"{self.units} */{length}"
return f"{self.units} {self.start}-{self.stop - 1}/{length}"
def __bool__(self):
return self.units is not None
def __str__(self):
return self.to_header()
def __repr__(self):
return f"<{type(self).__name__} {str(self)!r}>"
class Authorization(ImmutableDictMixin, dict):
"""Represents an ``Authorization`` header sent by the client.
This is returned by
:func:`~werkzeug.http.parse_authorization_header`. It can be useful
to create the object manually to pass to the test
:class:`~werkzeug.test.Client`.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
@property
def username(self):
"""The username transmitted. This is set for both basic and digest
auth all the time.
"""
return self.get("username")
@property
def password(self):
"""When the authentication type is basic this is the password
transmitted by the client, else `None`.
"""
return self.get("password")
@property
def realm(self):
"""This is the server realm sent back for HTTP digest auth."""
return self.get("realm")
@property
def nonce(self):
"""The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest auth.
"""
return self.get("nonce")
@property
def uri(self):
"""The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.
"""
return self.get("uri")
@property
def nc(self):
"""The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.
"""
return self.get("nc")
@property
def cnonce(self):
"""If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.
"""
return self.get("cnonce")
@property
def response(self):
"""A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.
"""
return self.get("response")
@property
def opaque(self):
"""The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.
"""
return self.get("opaque")
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth. Note that this is a single token,
not a quoted list of alternatives as in WWW-Authenticate.
"""
return self.get("qop")
def to_header(self):
"""Convert to a string value for an ``Authorization`` header.
.. versionadded:: 2.0
Added to support passing authorization to the test client.
"""
if self.type == "basic":
value = base64.b64encode(
f"{self.username}:{self.password}".encode()
).decode("utf8")
return f"Basic {value}"
if self.type == "digest":
return f"Digest {http.dump_header(self)}"
raise ValueError(f"Unsupported type {self.type!r}.")
def auth_property(name, doc=None):
"""A static helper function for Authentication subclasses to add
extra authentication system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return http.parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(["domain", "nonce", "opaque", "realm", "qop"])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self["__auth_type__"] = auth_type
self.on_update = on_update
def set_basic(self, realm="authentication required"):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {"__auth_type__": "basic", "realm": realm})
if self.on_update:
self.on_update(self)
def set_digest(
self, realm, nonce, qop=("auth",), opaque=None, algorithm=None, stale=False
):
"""Clear the auth info and enable digest auth."""
d = {
"__auth_type__": "digest",
"realm": realm,
"nonce": nonce,
"qop": http.dump_header(qop),
}
if stale:
d["stale"] = "TRUE"
if opaque is not None:
d["opaque"] = opaque
if algorithm is not None:
d["algorithm"] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop("__auth_type__", None) or "basic"
kv_items = (
(k, http.quote_header_value(v, allow_token=k not in self._require_quoting))
for k, v in d.items()
)
kv_string = ", ".join([f"{k}={v}" for k, v in kv_items])
return f"{auth_type.title()} {kv_string}"
def __str__(self):
return self.to_header()
def __repr__(self):
return f"<{type(self).__name__} {self.to_header()!r}>"
type = auth_property(
"__auth_type__",
doc="""The type of the auth mechanism. HTTP currently specifies
``Basic`` and ``Digest``.""",
)
realm = auth_property(
"realm",
doc="""A string to be displayed to users so they know which
username and password to use. This string should contain at
least the name of the host performing the authentication and
might additionally indicate the collection of users who might
have access.""",
)
domain = _set_property(
"domain",
doc="""A list of URIs that define the protection space. If a URI
is an absolute path, it is relative to the canonical root URL of
the server being accessed.""",
)
nonce = auth_property(
"nonce",
doc="""
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.""",
)
opaque = auth_property(
"opaque",
doc="""A string of data, specified by the server, which should
be returned by the client unchanged in the Authorization header
of subsequent requests with URIs in the same protection space.
It is recommended that this string be base64 or hexadecimal
data.""",
)
algorithm = auth_property(
"algorithm",
doc="""A string indicating a pair of algorithms used to produce
the digest and a checksum. If this is not present it is assumed
to be "MD5". If the algorithm is not understood, the challenge
should be ignored (and a different one used, if there is more
than one).""",
)
qop = _set_property(
"qop",
doc="""A set of quality-of-privacy directives such as auth and
auth-int.""",
)
@property
def stale(self):
"""A flag, indicating that the previous request from the client
was rejected because the nonce value was stale.
"""
val = self.get("stale")
if val is not None:
return val.lower() == "true"
@stale.setter
def stale(self, value):
if value is None:
self.pop("stale", None)
else:
self["stale"] = "TRUE" if value else "FALSE"
auth_property = staticmethod(auth_property)
class FileStorage:
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(
self,
stream=None,
filename=None,
name=None,
content_type=None,
content_length=None,
headers=None,
):
self.name = name
self.stream = stream or BytesIO()
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, "name", None)
s = _make_encode_wrapper(filename)
if filename and filename[0] == s("<") and filename[-1] == s(">"):
filename = None
# Make sure the filename is not bytes. This might happen if
# the file was opened from the bytes API.
if isinstance(filename, bytes):
filename = filename.decode(get_filesystem_encoding(), "replace")
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers["Content-Type"] = content_type
if content_length is not None:
headers["Content-Length"] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, "_parsed_content_type"):
self._parsed_content_type = http.parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get("content-type")
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get("content-length") or 0)
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename, :class:`os.PathLike`, or open file
object to write to.
:param buffer_size: Passed as the ``length`` parameter of
:func:`shutil.copyfileobj`.
.. versionchanged:: 1.0
Supports :mod:`pathlib`.
"""
from shutil import copyfileobj
close_dst = False
if hasattr(dst, "__fspath__"):
dst = fspath(dst)
if isinstance(dst, str):
dst = open(dst, "wb")
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __bool__(self):
return bool(self.filename)
def __getattr__(self, name):
try:
return getattr(self.stream, name)
except AttributeError:
# SpooledTemporaryFile doesn't implement IOBase, get the
# attribute from its backing file instead.
# https://github.com/python/cpython/pull/3249
if hasattr(self.stream, "_file"):
return getattr(self.stream._file, name)
raise
def __iter__(self):
return iter(self.stream)
def __repr__(self):
return f"<{type(self).__name__}: {self.filename!r} ({self.content_type!r})>"
# circular dependencies
from . import http | PypiClean |
/Flask-Captcha-New-0.2.0.tar.gz/Flask-Captcha-New-0.2.0/flask_captcha/models.py | from flask_sqlalchemy import SQLAlchemy
from flask_captcha.helpers import get_challenge
from flask import current_app
import datetime
import random
import time
import unicodedata
import six
db = SQLAlchemy()
# Heavily based on session key generation in Django
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
MAX_RANDOM_KEY = 18446744073709551616 # 2 << 63
import hashlib # sha for Python 2.5+
def get_cache():
from werkzeug.contrib.cache import SimpleCache
return SimpleCache()
def get_safe_now():
return datetime.datetime.utcnow()
# captcha wraparound counter, singleton
class CaptchaSequenceCache():
instance = None
def __init__(self, max_value):
self.max_value = max_value
self.cache = get_cache()
@classmethod
def get(cls):
if cls.instance is None:
cls.instance = CaptchaSequenceCache(current_app.config['CAPTCHA_PREGEN_MAX'])
return cls.instance
def current(self):
seq = self.cache.get('seq')
if seq is not None:
return seq
else:
return 0
def next(self):
seq = self.cache.get('seq')
if seq is not None:
seq = (seq + 1) % self.max_value
self.cache.set('seq', seq)
else:
self.cache.set('seq', 0)
seq = 0
return seq
# NOTE: replaced by cache implementation above
# we use a regular table to not have to deal with the hassle of sqlite not supporting sequences
class CaptchaSequence(db.Model):
__tablename__ = 'captcha_sequence'
value = db.Column(db.Integer, primary_key=True)
max_value = db.Column(db.Integer)
def __init__(self, start, max_value):
self.value = start
self.max_value = max_value
@classmethod
def init(cls):
start = current_app.config['CAPTCHA_PREGEN_START']
max_value = current_app.config['CAPTCHA_PREGEN_MAX']
sequence = CaptchaSequence(start, max_value)
db.session.add(sequence)
db.session.commit()
@classmethod
def get(cls):
row = db.session.query(CaptchaSequence).first()
if row is not None:
ret = row.value
else:
cls.init()
ret = 0
return ret
@classmethod
def next(cls):
row = db.session.query(CaptchaSequence).first()
if row is not None:
row.value = (row.value + 1) % row.max_value
ret = row.value
db.session.commit()
else:
cls.init()
ret = 0
return ret
class CaptchaStore(db.Model):
__tablename__ = 'captcha_store'
__table_args__ = {'sqlite_autoincrement': True}
index = db.Column(db.Integer, index=True)
challenge = db.Column(db.String(32))
response = db.Column(db.String(32))
hashkey = db.Column(db.String(40), primary_key=True)
expiration = db.Column(db.DateTime, default=datetime.datetime.utcnow)
def save(self, *args, **kwargs):
self.response = six.text_type(self.response).lower()
if not self.expiration:
self.set_expiration()
if not self.hashkey:
key_ = unicodedata.normalize(
'NFKD',
str(randrange(0, MAX_RANDOM_KEY)) +
str(time.time()) + six.text_type(self.challenge)
).encode('ascii', 'ignore') +\
unicodedata.normalize('NFKD', six.text_type(self.response)).\
encode('ascii', 'ignore')
if hashlib:
self.hashkey = hashlib.sha1(key_).hexdigest()
else:
self.hashkey = sha.new(key_).hexdigest()
del(key_)
db.session.add(self)
db.session.commit()
def set_expiration(self):
timeout = current_app.config['CAPTCHA_TIMEOUT']
self.expiration = (get_safe_now() +
datetime.timedelta(minutes=int(timeout)))
@classmethod
def validate(cls, hashkey, response):
'''
Returns true or false if key validates or not
'''
find = db.session.query(CaptchaStore).filter(
CaptchaStore.hashkey==hashkey,
CaptchaStore.expiration > get_safe_now())
if find.count() == 0:
return False
ret = (find.first().response == response)
if not current_app.config['CAPTCHA_PREGEN']:
db.session.delete(find.first())
db.session.commit()
return ret
def __unicode__(self):
return self.challenge
@classmethod
def remove_expired(cls):
items = db.session.query(CaptchaStore).\
filter(CaptchaStore.expiration <= get_safe_now())
for i in items:
db.session.delete(i)
db.session.commit()
@classmethod
def generate_key(cls):
c = cls.generate()
return c.hashkey
@classmethod
def generate(cls, index = 0):
challenge, response = get_challenge()()
c = CaptchaStore()
c.challenge = challenge
c.response = response
c.index = index
c.save()
return c
@classmethod
def get_all(cls):
items = db.session.query(CaptchaStore)
ret = []
for i in items:
ret.append({
"key": i.hashkey,
"index": i.index,
"challenge": i.challenge,
"response": i.response,
"expiration": i.expiration
})
return ret
@classmethod
def delete_all(cls):
ret = items = db.session.query(CaptchaStore).delete()
db.session.commit()
return ret | PypiClean |
/NEMO_CE-1.6.12-py3-none-any.whl/NEMO/static/datetimepicker/bootstrap-datetimepicker.js | The MIT License (MIT)
Copyright (c) 2015 Jonathan Peterson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
/*global define:false */
/*global exports:false */
/*global require:false */
/*global jQuery:false */
/*global moment:false */
(function (factory) {
'use strict';
if (typeof define === 'function' && define.amd) {
// AMD is used - Register as an anonymous module.
define(['jquery', 'moment'], factory);
} else if (typeof exports === 'object') {
module.exports = factory(require('jquery'), require('moment'));
} else {
// Neither AMD nor CommonJS used. Use global variables.
if (typeof jQuery === 'undefined') {
throw 'bootstrap-datetimepicker requires jQuery to be loaded first';
}
if (typeof moment === 'undefined') {
throw 'bootstrap-datetimepicker requires Moment.js to be loaded first';
}
factory(jQuery, moment);
}
}(function ($, moment) {
'use strict';
if (!moment) {
throw new Error('bootstrap-datetimepicker requires Moment.js to be loaded first');
}
var dateTimePicker = function (element, options) {
var picker = {},
date,
viewDate,
unset = true,
input,
component = false,
widget = false,
use24Hours,
minViewModeNumber = 0,
actualFormat,
parseFormats,
currentViewMode,
datePickerModes = [
{
clsName: 'days',
navFnc: 'M',
navStep: 1
},
{
clsName: 'months',
navFnc: 'y',
navStep: 1
},
{
clsName: 'years',
navFnc: 'y',
navStep: 10
},
{
clsName: 'decades',
navFnc: 'y',
navStep: 100
}
],
viewModes = ['days', 'months', 'years', 'decades'],
verticalModes = ['top', 'bottom', 'auto'],
horizontalModes = ['left', 'right', 'auto'],
toolbarPlacements = ['default', 'top', 'bottom'],
keyMap = {
'up': 38,
38: 'up',
'down': 40,
40: 'down',
'left': 37,
37: 'left',
'right': 39,
39: 'right',
'tab': 9,
9: 'tab',
'escape': 27,
27: 'escape',
'enter': 13,
13: 'enter',
'pageUp': 33,
33: 'pageUp',
'pageDown': 34,
34: 'pageDown',
'shift': 16,
16: 'shift',
'control': 17,
17: 'control',
'space': 32,
32: 'space',
't': 84,
84: 't',
'delete': 46,
46: 'delete'
},
keyState = {},
/********************************************************************************
*
* Private functions
*
********************************************************************************/
hasTimeZone = function () {
return moment.tz !== undefined && options.timeZone !== undefined && options.timeZone !== null && options.timeZone !== '';
},
getMoment = function (d) {
var returnMoment;
if (d === undefined || d === null) {
returnMoment = moment(); //TODO should this use format? and locale?
} else if (hasTimeZone()) { // There is a string to parse and a default time zone
// parse with the tz function which takes a default time zone if it is not in the format string
returnMoment = moment.tz(d, parseFormats, options.useStrict, options.timeZone);
} else {
returnMoment = moment(d, parseFormats, options.useStrict);
}
if (hasTimeZone()) {
returnMoment.tz(options.timeZone);
}
return returnMoment;
},
isEnabled = function (granularity) {
if (typeof granularity !== 'string' || granularity.length > 1) {
throw new TypeError('isEnabled expects a single character string parameter');
}
switch (granularity) {
case 'y':
return actualFormat.indexOf('Y') !== -1;
case 'M':
return actualFormat.indexOf('M') !== -1;
case 'd':
return actualFormat.toLowerCase().indexOf('d') !== -1;
case 'h':
case 'H':
return actualFormat.toLowerCase().indexOf('h') !== -1;
case 'm':
return actualFormat.indexOf('m') !== -1;
case 's':
return actualFormat.indexOf('s') !== -1;
default:
return false;
}
},
hasTime = function () {
return (isEnabled('h') || isEnabled('m') || isEnabled('s'));
},
hasDate = function () {
return (isEnabled('y') || isEnabled('M') || isEnabled('d'));
},
getDatePickerTemplate = function () {
var headTemplate = $('<thead>')
.append($('<tr>')
.append($('<th>').addClass('prev').attr('data-action', 'previous')
.append($('<span>').addClass(options.icons.previous))
)
.append($('<th>').addClass('picker-switch').attr('data-action', 'pickerSwitch').attr('colspan', (options.calendarWeeks ? '6' : '5')))
.append($('<th>').addClass('next').attr('data-action', 'next')
.append($('<span>').addClass(options.icons.next))
)
),
contTemplate = $('<tbody>')
.append($('<tr>')
.append($('<td>').attr('colspan', (options.calendarWeeks ? '8' : '7')))
);
return [
$('<div>').addClass('datepicker-days')
.append($('<table>').addClass('table-condensed')
.append(headTemplate)
.append($('<tbody>'))
),
$('<div>').addClass('datepicker-months')
.append($('<table>').addClass('table-condensed')
.append(headTemplate.clone())
.append(contTemplate.clone())
),
$('<div>').addClass('datepicker-years')
.append($('<table>').addClass('table-condensed')
.append(headTemplate.clone())
.append(contTemplate.clone())
),
$('<div>').addClass('datepicker-decades')
.append($('<table>').addClass('table-condensed')
.append(headTemplate.clone())
.append(contTemplate.clone())
)
];
},
getTimePickerMainTemplate = function () {
var topRow = $('<tr>'),
middleRow = $('<tr>'),
bottomRow = $('<tr>');
if (isEnabled('h')) {
topRow.append($('<td>')
.append($('<a>').attr({ href: '#', tabindex: '-1', 'title': options.tooltips.incrementHour }).addClass('btn').attr('data-action', 'incrementHours').append($('<span>').addClass(options.icons.up))));
middleRow.append($('<td>')
.append($('<span>').addClass('timepicker-hour').attr({ 'data-time-component': 'hours', 'title': options.tooltips.pickHour }).attr('data-action', 'showHours')));
bottomRow.append($('<td>')
.append($('<a>').attr({ href: '#', tabindex: '-1', 'title': options.tooltips.decrementHour }).addClass('btn').attr('data-action', 'decrementHours').append($('<span>').addClass(options.icons.down))));
}
if (isEnabled('m')) {
if (isEnabled('h')) {
topRow.append($('<td>').addClass('separator'));
middleRow.append($('<td>').addClass('separator').html(':'));
bottomRow.append($('<td>').addClass('separator'));
}
topRow.append($('<td>')
.append($('<a>').attr({ href: '#', tabindex: '-1', 'title': options.tooltips.incrementMinute }).addClass('btn').attr('data-action', 'incrementMinutes')
.append($('<span>').addClass(options.icons.up))));
middleRow.append($('<td>')
.append($('<span>').addClass('timepicker-minute').attr({ 'data-time-component': 'minutes', 'title': options.tooltips.pickMinute }).attr('data-action', 'showMinutes')));
bottomRow.append($('<td>')
.append($('<a>').attr({ href: '#', tabindex: '-1', 'title': options.tooltips.decrementMinute }).addClass('btn').attr('data-action', 'decrementMinutes')
.append($('<span>').addClass(options.icons.down))));
}
if (isEnabled('s')) {
if (isEnabled('m')) {
topRow.append($('<td>').addClass('separator'));
middleRow.append($('<td>').addClass('separator').html(':'));
bottomRow.append($('<td>').addClass('separator'));
}
topRow.append($('<td>')
.append($('<a>').attr({ href: '#', tabindex: '-1', 'title': options.tooltips.incrementSecond }).addClass('btn').attr('data-action', 'incrementSeconds')
.append($('<span>').addClass(options.icons.up))));
middleRow.append($('<td>')
.append($('<span>').addClass('timepicker-second').attr({ 'data-time-component': 'seconds', 'title': options.tooltips.pickSecond }).attr('data-action', 'showSeconds')));
bottomRow.append($('<td>')
.append($('<a>').attr({ href: '#', tabindex: '-1', 'title': options.tooltips.decrementSecond }).addClass('btn').attr('data-action', 'decrementSeconds')
.append($('<span>').addClass(options.icons.down))));
}
if (!use24Hours) {
topRow.append($('<td>').addClass('separator'));
middleRow.append($('<td>')
.append($('<button>').addClass('btn btn-primary').attr({ 'data-action': 'togglePeriod', tabindex: '-1', 'title': options.tooltips.togglePeriod })));
bottomRow.append($('<td>').addClass('separator'));
}
return $('<div>').addClass('timepicker-picker')
.append($('<table>').addClass('table-condensed')
.append([topRow, middleRow, bottomRow]));
},
getTimePickerTemplate = function () {
var hoursView = $('<div>').addClass('timepicker-hours')
.append($('<table>').addClass('table-condensed')),
minutesView = $('<div>').addClass('timepicker-minutes')
.append($('<table>').addClass('table-condensed')),
secondsView = $('<div>').addClass('timepicker-seconds')
.append($('<table>').addClass('table-condensed')),
ret = [getTimePickerMainTemplate()];
if (isEnabled('h')) {
ret.push(hoursView);
}
if (isEnabled('m')) {
ret.push(minutesView);
}
if (isEnabled('s')) {
ret.push(secondsView);
}
return ret;
},
getToolbar = function () {
var row = [];
if (options.showTodayButton) {
row.push($('<td>').append($('<a>').attr({ 'data-action': 'today', 'title': options.tooltips.today }).append($('<span>').addClass(options.icons.today))));
}
if (!options.sideBySide && hasDate() && hasTime()) {
row.push($('<td>').append($('<a>').attr({ 'data-action': 'togglePicker', 'title': options.tooltips.selectTime }).append($('<span>').addClass(options.icons.time))));
}
if (options.showClear) {
row.push($('<td>').append($('<a>').attr({ 'data-action': 'clear', 'title': options.tooltips.clear }).append($('<span>').addClass(options.icons.clear))));
}
if (options.showClose) {
row.push($('<td>').append($('<a>').attr({ 'data-action': 'close', 'title': options.tooltips.close }).append($('<span>').addClass(options.icons.close))));
}
return $('<table>').addClass('table-condensed').append($('<tbody>').append($('<tr>').append(row)));
},
getTemplate = function () {
var template = $('<div>').addClass('bootstrap-datetimepicker-widget dropdown-menu'),
dateView = $('<div>').addClass('datepicker').append(getDatePickerTemplate()),
timeView = $('<div>').addClass('timepicker').append(getTimePickerTemplate()),
content = $('<ul>').addClass('list-unstyled'),
toolbar = $('<li>').addClass('picker-switch' + (options.collapse ? ' accordion-toggle' : '')).append(getToolbar());
if (options.inline) {
template.removeClass('dropdown-menu');
}
if (use24Hours) {
template.addClass('usetwentyfour');
}
if (isEnabled('s') && !use24Hours) {
template.addClass('wider');
}
if (options.sideBySide && hasDate() && hasTime()) {
template.addClass('timepicker-sbs');
if (options.toolbarPlacement === 'top') {
template.append(toolbar);
}
template.append(
$('<div>').addClass('row')
.append(dateView.addClass('col-md-6'))
.append(timeView.addClass('col-md-6'))
);
if (options.toolbarPlacement === 'bottom') {
template.append(toolbar);
}
return template;
}
if (options.toolbarPlacement === 'top') {
content.append(toolbar);
}
if (hasDate()) {
content.append($('<li>').addClass((options.collapse && hasTime() ? 'collapse in' : '')).append(dateView));
}
if (options.toolbarPlacement === 'default') {
content.append(toolbar);
}
if (hasTime()) {
content.append($('<li>').addClass((options.collapse && hasDate() ? 'collapse' : '')).append(timeView));
}
if (options.toolbarPlacement === 'bottom') {
content.append(toolbar);
}
return template.append(content);
},
dataToOptions = function () {
var eData,
dataOptions = {};
if (element.is('input') || options.inline) {
eData = element.data();
} else {
eData = element.find('input').data();
}
if (eData.dateOptions && eData.dateOptions instanceof Object) {
dataOptions = $.extend(true, dataOptions, eData.dateOptions);
}
$.each(options, function (key) {
var attributeName = 'date' + key.charAt(0).toUpperCase() + key.slice(1);
if (eData[attributeName] !== undefined) {
dataOptions[key] = eData[attributeName];
}
});
return dataOptions;
},
place = function () {
var position = (component || element).position(),
offset = (component || element).offset(),
vertical = options.widgetPositioning.vertical,
horizontal = options.widgetPositioning.horizontal,
parent;
if (options.widgetParent) {
parent = options.widgetParent.append(widget);
} else if (element.is('input')) {
parent = element.after(widget).parent();
} else if (options.inline) {
parent = element.append(widget);
return;
} else {
parent = element;
element.children().first().after(widget);
}
// Top and bottom logic
if (vertical === 'auto') {
if (offset.top + widget.height() * 1.5 >= $(window).height() + $(window).scrollTop() &&
widget.height() + element.outerHeight() < offset.top) {
vertical = 'top';
} else {
vertical = 'bottom';
}
}
// Left and right logic
if (horizontal === 'auto') {
if (parent.width() < offset.left + widget.outerWidth() / 2 &&
offset.left + widget.outerWidth() > $(window).width()) {
horizontal = 'right';
} else {
horizontal = 'left';
}
}
if (vertical === 'top') {
widget.addClass('top').removeClass('bottom');
} else {
widget.addClass('bottom').removeClass('top');
}
if (horizontal === 'right') {
widget.addClass('pull-right');
} else {
widget.removeClass('pull-right');
}
// find the first parent element that has a relative css positioning
if (parent.css('position') !== 'relative') {
parent = parent.parents().filter(function () {
return $(this).css('position') === 'relative';
}).first();
}
if (parent.length === 0) {
throw new Error('datetimepicker component should be placed within a relative positioned container');
}
widget.css({
top: vertical === 'top' ? 'auto' : position.top + element.outerHeight(),
bottom: vertical === 'top' ? parent.outerHeight() - (parent === element ? 0 : position.top) : 'auto',
left: horizontal === 'left' ? (parent === element ? 0 : position.left) : 'auto',
right: horizontal === 'left' ? 'auto' : parent.outerWidth() - element.outerWidth() - (parent === element ? 0 : position.left)
});
},
notifyEvent = function (e) {
if (e.type === 'dp.change' && ((e.date && e.date.isSame(e.oldDate)) || (!e.date && !e.oldDate))) {
return;
}
element.trigger(e);
},
viewUpdate = function (e) {
if (e === 'y') {
e = 'YYYY';
}
notifyEvent({
type: 'dp.update',
change: e,
viewDate: viewDate.clone()
});
},
showMode = function (dir) {
if (!widget) {
return;
}
if (dir) {
currentViewMode = Math.max(minViewModeNumber, Math.min(3, currentViewMode + dir));
}
widget.find('.datepicker > div').hide().filter('.datepicker-' + datePickerModes[currentViewMode].clsName).show();
},
fillDow = function () {
var row = $('<tr>'),
currentDate = viewDate.clone().startOf('w').startOf('d');
if (options.calendarWeeks === true) {
row.append($('<th>').addClass('cw').text('#'));
}
while (currentDate.isBefore(viewDate.clone().endOf('w'))) {
row.append($('<th>').addClass('dow').text(currentDate.format('dd')));
currentDate.add(1, 'd');
}
widget.find('.datepicker-days thead').append(row);
},
isInDisabledDates = function (testDate) {
return options.disabledDates[testDate.format('YYYY-MM-DD')] === true;
},
isInEnabledDates = function (testDate) {
return options.enabledDates[testDate.format('YYYY-MM-DD')] === true;
},
isInDisabledHours = function (testDate) {
return options.disabledHours[testDate.format('H')] === true;
},
isInEnabledHours = function (testDate) {
return options.enabledHours[testDate.format('H')] === true;
},
isValid = function (targetMoment, granularity) {
if (!targetMoment.isValid()) {
return false;
}
if (options.disabledDates && granularity === 'd' && isInDisabledDates(targetMoment)) {
return false;
}
if (options.enabledDates && granularity === 'd' && !isInEnabledDates(targetMoment)) {
return false;
}
if (options.minDate && targetMoment.isBefore(options.minDate, granularity)) {
return false;
}
if (options.maxDate && targetMoment.isAfter(options.maxDate, granularity)) {
return false;
}
if (options.daysOfWeekDisabled && granularity === 'd' && options.daysOfWeekDisabled.indexOf(targetMoment.day()) !== -1) {
return false;
}
if (options.disabledHours && (granularity === 'h' || granularity === 'm' || granularity === 's') && isInDisabledHours(targetMoment)) {
return false;
}
if (options.enabledHours && (granularity === 'h' || granularity === 'm' || granularity === 's') && !isInEnabledHours(targetMoment)) {
return false;
}
if (options.disabledTimeIntervals && (granularity === 'h' || granularity === 'm' || granularity === 's')) {
var found = false;
$.each(options.disabledTimeIntervals, function () {
if (targetMoment.isBetween(this[0], this[1])) {
found = true;
return false;
}
});
if (found) {
return false;
}
}
return true;
},
fillMonths = function () {
var spans = [],
monthsShort = viewDate.clone().startOf('y').startOf('d');
while (monthsShort.isSame(viewDate, 'y')) {
spans.push($('<span>').attr('data-action', 'selectMonth').addClass('month').text(monthsShort.format('MMM')));
monthsShort.add(1, 'M');
}
widget.find('.datepicker-months td').empty().append(spans);
},
updateMonths = function () {
var monthsView = widget.find('.datepicker-months'),
monthsViewHeader = monthsView.find('th'),
months = monthsView.find('tbody').find('span');
monthsViewHeader.eq(0).find('span').attr('title', options.tooltips.prevYear);
monthsViewHeader.eq(1).attr('title', options.tooltips.selectYear);
monthsViewHeader.eq(2).find('span').attr('title', options.tooltips.nextYear);
monthsView.find('.disabled').removeClass('disabled');
if (!isValid(viewDate.clone().subtract(1, 'y'), 'y')) {
monthsViewHeader.eq(0).addClass('disabled');
}
monthsViewHeader.eq(1).text(viewDate.year());
if (!isValid(viewDate.clone().add(1, 'y'), 'y')) {
monthsViewHeader.eq(2).addClass('disabled');
}
months.removeClass('active');
if (date.isSame(viewDate, 'y') && !unset) {
months.eq(date.month()).addClass('active');
}
months.each(function (index) {
if (!isValid(viewDate.clone().month(index), 'M')) {
$(this).addClass('disabled');
}
});
},
updateYears = function () {
var yearsView = widget.find('.datepicker-years'),
yearsViewHeader = yearsView.find('th'),
startYear = viewDate.clone().subtract(5, 'y'),
endYear = viewDate.clone().add(6, 'y'),
html = '';
yearsViewHeader.eq(0).find('span').attr('title', options.tooltips.prevDecade);
yearsViewHeader.eq(1).attr('title', options.tooltips.selectDecade);
yearsViewHeader.eq(2).find('span').attr('title', options.tooltips.nextDecade);
yearsView.find('.disabled').removeClass('disabled');
if (options.minDate && options.minDate.isAfter(startYear, 'y')) {
yearsViewHeader.eq(0).addClass('disabled');
}
yearsViewHeader.eq(1).text(startYear.year() + '-' + endYear.year());
if (options.maxDate && options.maxDate.isBefore(endYear, 'y')) {
yearsViewHeader.eq(2).addClass('disabled');
}
while (!startYear.isAfter(endYear, 'y')) {
html += '<span data-action="selectYear" class="year' + (startYear.isSame(date, 'y') && !unset ? ' active' : '') + (!isValid(startYear, 'y') ? ' disabled' : '') + '">' + startYear.year() + '</span>';
startYear.add(1, 'y');
}
yearsView.find('td').html(html);
},
updateDecades = function () {
var decadesView = widget.find('.datepicker-decades'),
decadesViewHeader = decadesView.find('th'),
startDecade = moment({ y: viewDate.year() - (viewDate.year() % 100) - 1 }),
endDecade = startDecade.clone().add(100, 'y'),
startedAt = startDecade.clone(),
minDateDecade = false,
maxDateDecade = false,
endDecadeYear,
html = '';
decadesViewHeader.eq(0).find('span').attr('title', options.tooltips.prevCentury);
decadesViewHeader.eq(2).find('span').attr('title', options.tooltips.nextCentury);
decadesView.find('.disabled').removeClass('disabled');
if (startDecade.isSame(moment({ y: 1900 })) || (options.minDate && options.minDate.isAfter(startDecade, 'y'))) {
decadesViewHeader.eq(0).addClass('disabled');
}
decadesViewHeader.eq(1).text(startDecade.year() + '-' + endDecade.year());
if (startDecade.isSame(moment({ y: 2000 })) || (options.maxDate && options.maxDate.isBefore(endDecade, 'y'))) {
decadesViewHeader.eq(2).addClass('disabled');
}
while (!startDecade.isAfter(endDecade, 'y')) {
endDecadeYear = startDecade.year() + 12;
minDateDecade = options.minDate && options.minDate.isAfter(startDecade, 'y') && options.minDate.year() <= endDecadeYear;
maxDateDecade = options.maxDate && options.maxDate.isAfter(startDecade, 'y') && options.maxDate.year() <= endDecadeYear;
html += '<span data-action="selectDecade" class="decade' + (date.isAfter(startDecade) && date.year() <= endDecadeYear ? ' active' : '') +
(!isValid(startDecade, 'y') && !minDateDecade && !maxDateDecade ? ' disabled' : '') + '" data-selection="' + (startDecade.year() + 6) + '">' + (startDecade.year() + 1) + ' - ' + (startDecade.year() + 12) + '</span>';
startDecade.add(12, 'y');
}
html += '<span></span><span></span><span></span>'; //push the dangling block over, at least this way it's even
decadesView.find('td').html(html);
decadesViewHeader.eq(1).text((startedAt.year() + 1) + '-' + (startDecade.year()));
},
fillDate = function () {
var daysView = widget.find('.datepicker-days'),
daysViewHeader = daysView.find('th'),
currentDate,
html = [],
row,
clsName,
i;
if (!hasDate()) {
return;
}
daysViewHeader.eq(0).find('span').attr('title', options.tooltips.prevMonth);
daysViewHeader.eq(1).attr('title', options.tooltips.selectMonth);
daysViewHeader.eq(2).find('span').attr('title', options.tooltips.nextMonth);
daysView.find('.disabled').removeClass('disabled');
daysViewHeader.eq(1).text(viewDate.format(options.dayViewHeaderFormat));
if (!isValid(viewDate.clone().subtract(1, 'M'), 'M')) {
daysViewHeader.eq(0).addClass('disabled');
}
if (!isValid(viewDate.clone().add(1, 'M'), 'M')) {
daysViewHeader.eq(2).addClass('disabled');
}
currentDate = viewDate.clone().startOf('M').startOf('w').startOf('d');
for (i = 0; i < 42; i++) { //always display 42 days (should show 6 weeks)
if (currentDate.weekday() === 0) {
row = $('<tr>');
if (options.calendarWeeks) {
row.append('<td class="cw">' + currentDate.week() + '</td>');
}
html.push(row);
}
clsName = '';
if (currentDate.isBefore(viewDate, 'M')) {
clsName += ' old';
}
if (currentDate.isAfter(viewDate, 'M')) {
clsName += ' new';
}
if (currentDate.isSame(date, 'd') && !unset) {
clsName += ' active';
}
if (!isValid(currentDate, 'd')) {
clsName += ' disabled';
}
if (currentDate.isSame(getMoment(), 'd')) {
clsName += ' today';
}
if (currentDate.day() === 0 || currentDate.day() === 6) {
clsName += ' weekend';
}
row.append('<td data-action="selectDay" data-day="' + currentDate.format('L') + '" class="day' + clsName + '">' + currentDate.date() + '</td>');
currentDate.add(1, 'd');
}
daysView.find('tbody').empty().append(html);
updateMonths();
updateYears();
updateDecades();
},
fillHours = function () {
var table = widget.find('.timepicker-hours table'),
currentHour = viewDate.clone().startOf('d'),
html = [],
row = $('<tr>');
if (viewDate.hour() > 11 && !use24Hours) {
currentHour.hour(12);
}
while (currentHour.isSame(viewDate, 'd') && (use24Hours || (viewDate.hour() < 12 && currentHour.hour() < 12) || viewDate.hour() > 11)) {
if (currentHour.hour() % 4 === 0) {
row = $('<tr>');
html.push(row);
}
row.append('<td data-action="selectHour" class="hour' + (!isValid(currentHour, 'h') ? ' disabled' : '') + '">' + currentHour.format(use24Hours ? 'HH' : 'hh') + '</td>');
currentHour.add(1, 'h');
}
table.empty().append(html);
},
fillMinutes = function () {
var table = widget.find('.timepicker-minutes table'),
currentMinute = viewDate.clone().startOf('h'),
html = [],
row = $('<tr>'),
step = options.stepping === 1 ? 5 : options.stepping;
while (viewDate.isSame(currentMinute, 'h')) {
if (currentMinute.minute() % (step * 4) === 0) {
row = $('<tr>');
html.push(row);
}
row.append('<td data-action="selectMinute" class="minute' + (!isValid(currentMinute, 'm') ? ' disabled' : '') + '">' + currentMinute.format('mm') + '</td>');
currentMinute.add(step, 'm');
}
table.empty().append(html);
},
fillSeconds = function () {
var table = widget.find('.timepicker-seconds table'),
currentSecond = viewDate.clone().startOf('m'),
html = [],
row = $('<tr>');
while (viewDate.isSame(currentSecond, 'm')) {
if (currentSecond.second() % 20 === 0) {
row = $('<tr>');
html.push(row);
}
row.append('<td data-action="selectSecond" class="second' + (!isValid(currentSecond, 's') ? ' disabled' : '') + '">' + currentSecond.format('ss') + '</td>');
currentSecond.add(5, 's');
}
table.empty().append(html);
},
fillTime = function () {
var toggle, newDate, timeComponents = widget.find('.timepicker span[data-time-component]');
if (!use24Hours) {
toggle = widget.find('.timepicker [data-action=togglePeriod]');
newDate = date.clone().add((date.hours() >= 12) ? -12 : 12, 'h');
toggle.text(date.format('A'));
if (isValid(newDate, 'h')) {
toggle.removeClass('disabled');
} else {
toggle.addClass('disabled');
}
}
timeComponents.filter('[data-time-component=hours]').text(date.format(use24Hours ? 'HH' : 'hh'));
timeComponents.filter('[data-time-component=minutes]').text(date.format('mm'));
timeComponents.filter('[data-time-component=seconds]').text(date.format('ss'));
fillHours();
fillMinutes();
fillSeconds();
},
update = function () {
if (!widget) {
return;
}
fillDate();
fillTime();
},
setValue = function (targetMoment) {
var oldDate = unset ? null : date;
// case of calling setValue(null or false)
if (!targetMoment) {
unset = true;
input.val('');
element.data('date', '');
notifyEvent({
type: 'dp.change',
date: false,
oldDate: oldDate
});
update();
return;
}
targetMoment = targetMoment.clone().locale(options.locale);
if (hasTimeZone()) {
targetMoment.tz(options.timeZone);
}
if (options.stepping !== 1) {
targetMoment.minutes((Math.round(targetMoment.minutes() / options.stepping) * options.stepping)).seconds(0);
}
if (isValid(targetMoment)) {
date = targetMoment;
//viewDate = date.clone(); // TODO this doesn't work right on first use
input.val(date.format(actualFormat));
element.data('date', date.format(actualFormat));
unset = false;
update();
notifyEvent({
type: 'dp.change',
date: date.clone(),
oldDate: oldDate
});
} else {
if (!options.keepInvalid) {
input.val(unset ? '' : date.format(actualFormat));
} else {
notifyEvent({
type: 'dp.change',
date: targetMoment,
oldDate: oldDate
});
}
notifyEvent({
type: 'dp.error',
date: targetMoment,
oldDate: oldDate
});
}
},
/**
* Hides the widget. Possibly will emit dp.hide
*/
hide = function () {
var transitioning = false;
if (!widget) {
return picker;
}
// Ignore event if in the middle of a picker transition
widget.find('.collapse').each(function () {
var collapseData = $(this).data('collapse');
if (collapseData && collapseData.transitioning) {
transitioning = true;
return false;
}
return true;
});
if (transitioning) {
return picker;
}
if (component && component.hasClass('btn')) {
component.toggleClass('active');
}
widget.hide();
$(window).off('resize', place);
widget.off('click', '[data-action]');
widget.off('mousedown', false);
widget.remove();
widget = false;
notifyEvent({
type: 'dp.hide',
date: date.clone()
});
input.blur();
currentViewMode = 0;
viewDate = date.clone();
return picker;
},
clear = function () {
setValue(null);
},
parseInputDate = function (inputDate) {
if (options.parseInputDate === undefined) {
if (!moment.isMoment(inputDate)) {
inputDate = getMoment(inputDate);
}
} else {
inputDate = options.parseInputDate(inputDate);
}
//inputDate.locale(options.locale);
return inputDate;
},
/********************************************************************************
*
* Widget UI interaction functions
*
********************************************************************************/
actions = {
next: function () {
var navFnc = datePickerModes[currentViewMode].navFnc;
viewDate.add(datePickerModes[currentViewMode].navStep, navFnc);
fillDate();
viewUpdate(navFnc);
},
previous: function () {
var navFnc = datePickerModes[currentViewMode].navFnc;
viewDate.subtract(datePickerModes[currentViewMode].navStep, navFnc);
fillDate();
viewUpdate(navFnc);
},
pickerSwitch: function () {
showMode(1);
},
selectMonth: function (e) {
var month = $(e.target).closest('tbody').find('span').index($(e.target));
viewDate.month(month);
if (currentViewMode === minViewModeNumber) {
setValue(date.clone().year(viewDate.year()).month(viewDate.month()));
if (!options.inline) {
hide();
}
} else {
showMode(-1);
fillDate();
}
viewUpdate('M');
},
selectYear: function (e) {
var year = parseInt($(e.target).text(), 10) || 0;
viewDate.year(year);
if (currentViewMode === minViewModeNumber) {
setValue(date.clone().year(viewDate.year()));
if (!options.inline) {
hide();
}
} else {
showMode(-1);
fillDate();
}
viewUpdate('YYYY');
},
selectDecade: function (e) {
var year = parseInt($(e.target).data('selection'), 10) || 0;
viewDate.year(year);
if (currentViewMode === minViewModeNumber) {
setValue(date.clone().year(viewDate.year()));
if (!options.inline) {
hide();
}
} else {
showMode(-1);
fillDate();
}
viewUpdate('YYYY');
},
selectDay: function (e) {
var day = viewDate.clone();
if ($(e.target).is('.old')) {
day.subtract(1, 'M');
}
if ($(e.target).is('.new')) {
day.add(1, 'M');
}
setValue(day.date(parseInt($(e.target).text(), 10)));
if (!hasTime() && !options.keepOpen && !options.inline) {
hide();
}
},
incrementHours: function () {
var newDate = date.clone().add(1, 'h');
if (isValid(newDate, 'h')) {
setValue(newDate);
}
},
incrementMinutes: function () {
var newDate = date.clone().add(options.stepping, 'm');
if (isValid(newDate, 'm')) {
setValue(newDate);
}
},
incrementSeconds: function () {
var newDate = date.clone().add(1, 's');
if (isValid(newDate, 's')) {
setValue(newDate);
}
},
decrementHours: function () {
var newDate = date.clone().subtract(1, 'h');
if (isValid(newDate, 'h')) {
setValue(newDate);
}
},
decrementMinutes: function () {
var newDate = date.clone().subtract(options.stepping, 'm');
if (isValid(newDate, 'm')) {
setValue(newDate);
}
},
decrementSeconds: function () {
var newDate = date.clone().subtract(1, 's');
if (isValid(newDate, 's')) {
setValue(newDate);
}
},
togglePeriod: function () {
setValue(date.clone().add((date.hours() >= 12) ? -12 : 12, 'h'));
},
togglePicker: function (e) {
var $this = $(e.target),
$parent = $this.closest('ul'),
expanded = $parent.find('.in'),
closed = $parent.find('.collapse:not(.in)'),
collapseData;
if (expanded && expanded.length) {
collapseData = expanded.data('collapse');
if (collapseData && collapseData.transitioning) {
return;
}
if (expanded.collapse) { // if collapse plugin is available through bootstrap.js then use it
expanded.collapse('hide');
closed.collapse('show');
} else { // otherwise just toggle in class on the two views
expanded.removeClass('in');
closed.addClass('in');
}
if ($this.is('span')) {
$this.toggleClass(options.icons.time + ' ' + options.icons.date);
} else {
$this.find('span').toggleClass(options.icons.time + ' ' + options.icons.date);
}
// NOTE: uncomment if toggled state will be restored in show()
//if (component) {
// component.find('span').toggleClass(options.icons.time + ' ' + options.icons.date);
//}
}
},
showPicker: function () {
widget.find('.timepicker > div:not(.timepicker-picker)').hide();
widget.find('.timepicker .timepicker-picker').show();
},
showHours: function () {
widget.find('.timepicker .timepicker-picker').hide();
widget.find('.timepicker .timepicker-hours').show();
},
showMinutes: function () {
widget.find('.timepicker .timepicker-picker').hide();
widget.find('.timepicker .timepicker-minutes').show();
},
showSeconds: function () {
widget.find('.timepicker .timepicker-picker').hide();
widget.find('.timepicker .timepicker-seconds').show();
},
selectHour: function (e) {
var hour = parseInt($(e.target).text(), 10);
if (!use24Hours) {
if (date.hours() >= 12) {
if (hour !== 12) {
hour += 12;
}
} else {
if (hour === 12) {
hour = 0;
}
}
}
setValue(date.clone().hours(hour));
actions.showPicker.call(picker);
},
selectMinute: function (e) {
setValue(date.clone().minutes(parseInt($(e.target).text(), 10)));
actions.showPicker.call(picker);
},
selectSecond: function (e) {
setValue(date.clone().seconds(parseInt($(e.target).text(), 10)));
actions.showPicker.call(picker);
},
clear: clear,
today: function () {
var todaysDate = getMoment();
if (isValid(todaysDate, 'd')) {
setValue(todaysDate);
}
},
close: hide
},
doAction = function (e) {
if ($(e.currentTarget).is('.disabled')) {
return false;
}
actions[$(e.currentTarget).data('action')].apply(picker, arguments);
return false;
},
/**
* Shows the widget. Possibly will emit dp.show and dp.change
*/
show = function () {
var currentMoment,
useCurrentGranularity = {
'year': function (m) {
return m.month(0).date(1).hours(0).seconds(0).minutes(0);
},
'month': function (m) {
return m.date(1).hours(0).seconds(0).minutes(0);
},
'day': function (m) {
return m.hours(0).seconds(0).minutes(0);
},
'hour': function (m) {
return m.seconds(0).minutes(0);
},
'minute': function (m) {
return m.seconds(0);
}
};
if (input.prop('disabled') || (!options.ignoreReadonly && input.prop('readonly')) || widget) {
return picker;
}
if (input.val() !== undefined && input.val().trim().length !== 0) {
setValue(parseInputDate(input.val().trim()));
} else if (unset && options.useCurrent && (options.inline || (input.is('input') && input.val().trim().length === 0))) {
currentMoment = getMoment();
if (typeof options.useCurrent === 'string') {
currentMoment = useCurrentGranularity[options.useCurrent](currentMoment);
}
setValue(currentMoment);
}
widget = getTemplate();
fillDow();
fillMonths();
widget.find('.timepicker-hours').hide();
widget.find('.timepicker-minutes').hide();
widget.find('.timepicker-seconds').hide();
update();
showMode();
$(window).on('resize', place);
widget.on('click', '[data-action]', doAction); // this handles clicks on the widget
widget.on('mousedown', false);
if (component && component.hasClass('btn')) {
component.toggleClass('active');
}
place();
widget.show();
if (options.focusOnShow && !input.is(':focus')) {
input.focus();
}
notifyEvent({
type: 'dp.show'
});
return picker;
},
/**
* Shows or hides the widget
*/
toggle = function () {
return (widget ? hide() : show());
},
keydown = function (e) {
var handler = null,
index,
index2,
pressedKeys = [],
pressedModifiers = {},
currentKey = e.which,
keyBindKeys,
allModifiersPressed,
pressed = 'p';
keyState[currentKey] = pressed;
for (index in keyState) {
if (keyState.hasOwnProperty(index) && keyState[index] === pressed) {
pressedKeys.push(index);
if (parseInt(index, 10) !== currentKey) {
pressedModifiers[index] = true;
}
}
}
for (index in options.keyBinds) {
if (options.keyBinds.hasOwnProperty(index) && typeof (options.keyBinds[index]) === 'function') {
keyBindKeys = index.split(' ');
if (keyBindKeys.length === pressedKeys.length && keyMap[currentKey] === keyBindKeys[keyBindKeys.length - 1]) {
allModifiersPressed = true;
for (index2 = keyBindKeys.length - 2; index2 >= 0; index2--) {
if (!(keyMap[keyBindKeys[index2]] in pressedModifiers)) {
allModifiersPressed = false;
break;
}
}
if (allModifiersPressed) {
handler = options.keyBinds[index];
break;
}
}
}
}
if (handler) {
handler.call(picker, widget);
e.stopPropagation();
e.preventDefault();
}
},
keyup = function (e) {
keyState[e.which] = 'r';
e.stopPropagation();
e.preventDefault();
},
change = function (e) {
var val = $(e.target).val().trim(),
parsedDate = val ? parseInputDate(val) : null;
setValue(parsedDate);
e.stopImmediatePropagation();
return false;
},
attachDatePickerElementEvents = function () {
input.on({
'change': change,
'blur': options.debug ? '' : hide,
'keydown': keydown,
'keyup': keyup,
'focus': options.allowInputToggle ? show : ''
});
if (element.is('input')) {
input.on({
'focus': show
});
} else if (component) {
component.on('click', toggle);
component.on('mousedown', false);
}
},
detachDatePickerElementEvents = function () {
input.off({
'change': change,
'blur': blur,
'keydown': keydown,
'keyup': keyup,
'focus': options.allowInputToggle ? hide : ''
});
if (element.is('input')) {
input.off({
'focus': show
});
} else if (component) {
component.off('click', toggle);
component.off('mousedown', false);
}
},
indexGivenDates = function (givenDatesArray) {
// Store given enabledDates and disabledDates as keys.
// This way we can check their existence in O(1) time instead of looping through whole array.
// (for example: options.enabledDates['2014-02-27'] === true)
var givenDatesIndexed = {};
$.each(givenDatesArray, function () {
var dDate = parseInputDate(this);
if (dDate.isValid()) {
givenDatesIndexed[dDate.format('YYYY-MM-DD')] = true;
}
});
return (Object.keys(givenDatesIndexed).length) ? givenDatesIndexed : false;
},
indexGivenHours = function (givenHoursArray) {
// Store given enabledHours and disabledHours as keys.
// This way we can check their existence in O(1) time instead of looping through whole array.
// (for example: options.enabledHours['2014-02-27'] === true)
var givenHoursIndexed = {};
$.each(givenHoursArray, function () {
givenHoursIndexed[this] = true;
});
return (Object.keys(givenHoursIndexed).length) ? givenHoursIndexed : false;
},
initFormatting = function () {
var format = options.format || 'L LT';
actualFormat = format.replace(/(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g, function (formatInput) {
var newinput = date.localeData().longDateFormat(formatInput) || formatInput;
return newinput.replace(/(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g, function (formatInput2) { //temp fix for #740
return date.localeData().longDateFormat(formatInput2) || formatInput2;
});
});
parseFormats = options.extraFormats ? options.extraFormats.slice() : [];
if (parseFormats.indexOf(format) < 0 && parseFormats.indexOf(actualFormat) < 0) {
parseFormats.push(actualFormat);
}
use24Hours = (actualFormat.toLowerCase().indexOf('a') < 1 && actualFormat.replace(/\[.*?\]/g, '').indexOf('h') < 1);
if (isEnabled('y')) {
minViewModeNumber = 2;
}
if (isEnabled('M')) {
minViewModeNumber = 1;
}
if (isEnabled('d')) {
minViewModeNumber = 0;
}
currentViewMode = Math.max(minViewModeNumber, currentViewMode);
if (!unset) {
setValue(date);
}
};
/********************************************************************************
*
* Public API functions
* =====================
*
* Important: Do not expose direct references to private objects or the options
* object to the outer world. Always return a clone when returning values or make
* a clone when setting a private variable.
*
********************************************************************************/
picker.destroy = function () {
///<summary>Destroys the widget and removes all attached event listeners</summary>
hide();
detachDatePickerElementEvents();
element.removeData('DateTimePicker');
element.removeData('date');
};
picker.toggle = toggle;
picker.show = show;
picker.hide = hide;
picker.disable = function () {
///<summary>Disables the input element, the component is attached to, by adding a disabled="true" attribute to it.
///If the widget was visible before that call it is hidden. Possibly emits dp.hide</summary>
hide();
if (component && component.hasClass('btn')) {
component.addClass('disabled');
}
input.prop('disabled', true);
return picker;
};
picker.enable = function () {
///<summary>Enables the input element, the component is attached to, by removing disabled attribute from it.</summary>
if (component && component.hasClass('btn')) {
component.removeClass('disabled');
}
input.prop('disabled', false);
return picker;
};
picker.ignoreReadonly = function (ignoreReadonly) {
if (arguments.length === 0) {
return options.ignoreReadonly;
}
if (typeof ignoreReadonly !== 'boolean') {
throw new TypeError('ignoreReadonly () expects a boolean parameter');
}
options.ignoreReadonly = ignoreReadonly;
return picker;
};
picker.options = function (newOptions) {
if (arguments.length === 0) {
return $.extend(true, {}, options);
}
if (!(newOptions instanceof Object)) {
throw new TypeError('options() options parameter should be an object');
}
$.extend(true, options, newOptions);
$.each(options, function (key, value) {
if (picker[key] !== undefined) {
picker[key](value);
} else {
throw new TypeError('option ' + key + ' is not recognized!');
}
});
return picker;
};
picker.date = function (newDate) {
///<signature helpKeyword="$.fn.datetimepicker.date">
///<summary>Returns the component's model current date, a moment object or null if not set.</summary>
///<returns type="Moment">date.clone()</returns>
///</signature>
///<signature>
///<summary>Sets the components model current moment to it. Passing a null value unsets the components model current moment. Parsing of the newDate parameter is made using moment library with the options.format and options.useStrict components configuration.</summary>
///<param name="newDate" locid="$.fn.datetimepicker.date_p:newDate">Takes string, Date, moment, null parameter.</param>
///</signature>
if (arguments.length === 0) {
if (unset) {
return null;
}
return date.clone();
}
if (newDate !== null && typeof newDate !== 'string' && !moment.isMoment(newDate) && !(newDate instanceof Date)) {
throw new TypeError('date() parameter must be one of [null, string, moment or Date]');
}
setValue(newDate === null ? null : parseInputDate(newDate));
return picker;
};
picker.format = function (newFormat) {
///<summary>test su</summary>
///<param name="newFormat">info about para</param>
///<returns type="string|boolean">returns foo</returns>
if (arguments.length === 0) {
return options.format;
}
if ((typeof newFormat !== 'string') && ((typeof newFormat !== 'boolean') || (newFormat !== false))) {
throw new TypeError('format() expects a string or boolean:false parameter ' + newFormat);
}
options.format = newFormat;
if (actualFormat) {
initFormatting(); // reinit formatting
}
return picker;
};
picker.timeZone = function (newZone) {
if (arguments.length === 0) {
return options.timeZone;
}
if (typeof newZone !== 'string') {
throw new TypeError('newZone() expects a string parameter');
}
options.timeZone = newZone;
return picker;
};
picker.dayViewHeaderFormat = function (newFormat) {
if (arguments.length === 0) {
return options.dayViewHeaderFormat;
}
if (typeof newFormat !== 'string') {
throw new TypeError('dayViewHeaderFormat() expects a string parameter');
}
options.dayViewHeaderFormat = newFormat;
return picker;
};
picker.extraFormats = function (formats) {
if (arguments.length === 0) {
return options.extraFormats;
}
if (formats !== false && !(formats instanceof Array)) {
throw new TypeError('extraFormats() expects an array or false parameter');
}
options.extraFormats = formats;
if (parseFormats) {
initFormatting(); // reinit formatting
}
return picker;
};
picker.disabledDates = function (dates) {
///<signature helpKeyword="$.fn.datetimepicker.disabledDates">
///<summary>Returns an array with the currently set disabled dates on the component.</summary>
///<returns type="array">options.disabledDates</returns>
///</signature>
///<signature>
///<summary>Setting this takes precedence over options.minDate, options.maxDate configuration. Also calling this function removes the configuration of
///options.enabledDates if such exist.</summary>
///<param name="dates" locid="$.fn.datetimepicker.disabledDates_p:dates">Takes an [ string or Date or moment ] of values and allows the user to select only from those days.</param>
///</signature>
if (arguments.length === 0) {
return (options.disabledDates ? $.extend({}, options.disabledDates) : options.disabledDates);
}
if (!dates) {
options.disabledDates = false;
update();
return picker;
}
if (!(dates instanceof Array)) {
throw new TypeError('disabledDates() expects an array parameter');
}
options.disabledDates = indexGivenDates(dates);
options.enabledDates = false;
update();
return picker;
};
picker.enabledDates = function (dates) {
///<signature helpKeyword="$.fn.datetimepicker.enabledDates">
///<summary>Returns an array with the currently set enabled dates on the component.</summary>
///<returns type="array">options.enabledDates</returns>
///</signature>
///<signature>
///<summary>Setting this takes precedence over options.minDate, options.maxDate configuration. Also calling this function removes the configuration of options.disabledDates if such exist.</summary>
///<param name="dates" locid="$.fn.datetimepicker.enabledDates_p:dates">Takes an [ string or Date or moment ] of values and allows the user to select only from those days.</param>
///</signature>
if (arguments.length === 0) {
return (options.enabledDates ? $.extend({}, options.enabledDates) : options.enabledDates);
}
if (!dates) {
options.enabledDates = false;
update();
return picker;
}
if (!(dates instanceof Array)) {
throw new TypeError('enabledDates() expects an array parameter');
}
options.enabledDates = indexGivenDates(dates);
options.disabledDates = false;
update();
return picker;
};
picker.daysOfWeekDisabled = function (daysOfWeekDisabled) {
if (arguments.length === 0) {
return options.daysOfWeekDisabled.splice(0);
}
if ((typeof daysOfWeekDisabled === 'boolean') && !daysOfWeekDisabled) {
options.daysOfWeekDisabled = false;
update();
return picker;
}
if (!(daysOfWeekDisabled instanceof Array)) {
throw new TypeError('daysOfWeekDisabled() expects an array parameter');
}
options.daysOfWeekDisabled = daysOfWeekDisabled.reduce(function (previousValue, currentValue) {
currentValue = parseInt(currentValue, 10);
if (currentValue > 6 || currentValue < 0 || isNaN(currentValue)) {
return previousValue;
}
if (previousValue.indexOf(currentValue) === -1) {
previousValue.push(currentValue);
}
return previousValue;
}, []).sort();
if (options.useCurrent && !options.keepInvalid) {
var tries = 0;
while (!isValid(date, 'd')) {
date.add(1, 'd');
if (tries === 31) {
throw 'Tried 31 times to find a valid date';
}
tries++;
}
setValue(date);
}
update();
return picker;
};
picker.maxDate = function (maxDate) {
if (arguments.length === 0) {
return options.maxDate ? options.maxDate.clone() : options.maxDate;
}
if ((typeof maxDate === 'boolean') && maxDate === false) {
options.maxDate = false;
update();
return picker;
}
if (typeof maxDate === 'string') {
if (maxDate === 'now' || maxDate === 'moment') {
maxDate = getMoment();
}
}
var parsedDate = parseInputDate(maxDate);
if (!parsedDate.isValid()) {
throw new TypeError('maxDate() Could not parse date parameter: ' + maxDate);
}
if (options.minDate && parsedDate.isBefore(options.minDate)) {
throw new TypeError('maxDate() date parameter is before options.minDate: ' + parsedDate.format(actualFormat));
}
options.maxDate = parsedDate;
if (options.useCurrent && !options.keepInvalid && date.isAfter(maxDate)) {
setValue(options.maxDate);
}
if (viewDate.isAfter(parsedDate)) {
viewDate = parsedDate.clone().subtract(options.stepping, 'm');
}
update();
return picker;
};
picker.minDate = function (minDate) {
if (arguments.length === 0) {
return options.minDate ? options.minDate.clone() : options.minDate;
}
if ((typeof minDate === 'boolean') && minDate === false) {
options.minDate = false;
update();
return picker;
}
if (typeof minDate === 'string') {
if (minDate === 'now' || minDate === 'moment') {
minDate = getMoment();
}
}
var parsedDate = parseInputDate(minDate);
if (!parsedDate.isValid()) {
throw new TypeError('minDate() Could not parse date parameter: ' + minDate);
}
if (options.maxDate && parsedDate.isAfter(options.maxDate)) {
throw new TypeError('minDate() date parameter is after options.maxDate: ' + parsedDate.format(actualFormat));
}
options.minDate = parsedDate;
if (options.useCurrent && !options.keepInvalid && date.isBefore(minDate)) {
setValue(options.minDate);
}
if (viewDate.isBefore(parsedDate)) {
viewDate = parsedDate.clone().add(options.stepping, 'm');
}
update();
return picker;
};
picker.defaultDate = function (defaultDate) {
///<signature helpKeyword="$.fn.datetimepicker.defaultDate">
///<summary>Returns a moment with the options.defaultDate option configuration or false if not set</summary>
///<returns type="Moment">date.clone()</returns>
///</signature>
///<signature>
///<summary>Will set the picker's inital date. If a boolean:false value is passed the options.defaultDate parameter is cleared.</summary>
///<param name="defaultDate" locid="$.fn.datetimepicker.defaultDate_p:defaultDate">Takes a string, Date, moment, boolean:false</param>
///</signature>
if (arguments.length === 0) {
return options.defaultDate ? options.defaultDate.clone() : options.defaultDate;
}
if (!defaultDate) {
options.defaultDate = false;
return picker;
}
if (typeof defaultDate === 'string') {
if (defaultDate === 'now' || defaultDate === 'moment') {
defaultDate = getMoment();
} else {
defaultDate = getMoment(defaultDate);
}
}
var parsedDate = parseInputDate(defaultDate);
if (!parsedDate.isValid()) {
throw new TypeError('defaultDate() Could not parse date parameter: ' + defaultDate);
}
if (!isValid(parsedDate)) {
throw new TypeError('defaultDate() date passed is invalid according to component setup validations');
}
options.defaultDate = parsedDate;
if ((options.defaultDate && options.inline) || input.val().trim() === '') {
setValue(options.defaultDate);
}
return picker;
};
picker.locale = function (locale) {
if (arguments.length === 0) {
return options.locale;
}
if (!moment.localeData(locale)) {
throw new TypeError('locale() locale ' + locale + ' is not loaded from moment locales!');
}
options.locale = locale;
date.locale(options.locale);
viewDate.locale(options.locale);
if (actualFormat) {
initFormatting(); // reinit formatting
}
if (widget) {
hide();
show();
}
return picker;
};
picker.stepping = function (stepping) {
if (arguments.length === 0) {
return options.stepping;
}
stepping = parseInt(stepping, 10);
if (isNaN(stepping) || stepping < 1) {
stepping = 1;
}
options.stepping = stepping;
return picker;
};
picker.useCurrent = function (useCurrent) {
var useCurrentOptions = ['year', 'month', 'day', 'hour', 'minute'];
if (arguments.length === 0) {
return options.useCurrent;
}
if ((typeof useCurrent !== 'boolean') && (typeof useCurrent !== 'string')) {
throw new TypeError('useCurrent() expects a boolean or string parameter');
}
if (typeof useCurrent === 'string' && useCurrentOptions.indexOf(useCurrent.toLowerCase()) === -1) {
throw new TypeError('useCurrent() expects a string parameter of ' + useCurrentOptions.join(', '));
}
options.useCurrent = useCurrent;
return picker;
};
picker.collapse = function (collapse) {
if (arguments.length === 0) {
return options.collapse;
}
if (typeof collapse !== 'boolean') {
throw new TypeError('collapse() expects a boolean parameter');
}
if (options.collapse === collapse) {
return picker;
}
options.collapse = collapse;
if (widget) {
hide();
show();
}
return picker;
};
picker.icons = function (icons) {
if (arguments.length === 0) {
return $.extend({}, options.icons);
}
if (!(icons instanceof Object)) {
throw new TypeError('icons() expects parameter to be an Object');
}
$.extend(options.icons, icons);
if (widget) {
hide();
show();
}
return picker;
};
picker.tooltips = function (tooltips) {
if (arguments.length === 0) {
return $.extend({}, options.tooltips);
}
if (!(tooltips instanceof Object)) {
throw new TypeError('tooltips() expects parameter to be an Object');
}
$.extend(options.tooltips, tooltips);
if (widget) {
hide();
show();
}
return picker;
};
picker.useStrict = function (useStrict) {
if (arguments.length === 0) {
return options.useStrict;
}
if (typeof useStrict !== 'boolean') {
throw new TypeError('useStrict() expects a boolean parameter');
}
options.useStrict = useStrict;
return picker;
};
picker.sideBySide = function (sideBySide) {
if (arguments.length === 0) {
return options.sideBySide;
}
if (typeof sideBySide !== 'boolean') {
throw new TypeError('sideBySide() expects a boolean parameter');
}
options.sideBySide = sideBySide;
if (widget) {
hide();
show();
}
return picker;
};
picker.viewMode = function (viewMode) {
if (arguments.length === 0) {
return options.viewMode;
}
if (typeof viewMode !== 'string') {
throw new TypeError('viewMode() expects a string parameter');
}
if (viewModes.indexOf(viewMode) === -1) {
throw new TypeError('viewMode() parameter must be one of (' + viewModes.join(', ') + ') value');
}
options.viewMode = viewMode;
currentViewMode = Math.max(viewModes.indexOf(viewMode), minViewModeNumber);
showMode();
return picker;
};
picker.toolbarPlacement = function (toolbarPlacement) {
if (arguments.length === 0) {
return options.toolbarPlacement;
}
if (typeof toolbarPlacement !== 'string') {
throw new TypeError('toolbarPlacement() expects a string parameter');
}
if (toolbarPlacements.indexOf(toolbarPlacement) === -1) {
throw new TypeError('toolbarPlacement() parameter must be one of (' + toolbarPlacements.join(', ') + ') value');
}
options.toolbarPlacement = toolbarPlacement;
if (widget) {
hide();
show();
}
return picker;
};
picker.widgetPositioning = function (widgetPositioning) {
if (arguments.length === 0) {
return $.extend({}, options.widgetPositioning);
}
if (({}).toString.call(widgetPositioning) !== '[object Object]') {
throw new TypeError('widgetPositioning() expects an object variable');
}
if (widgetPositioning.horizontal) {
if (typeof widgetPositioning.horizontal !== 'string') {
throw new TypeError('widgetPositioning() horizontal variable must be a string');
}
widgetPositioning.horizontal = widgetPositioning.horizontal.toLowerCase();
if (horizontalModes.indexOf(widgetPositioning.horizontal) === -1) {
throw new TypeError('widgetPositioning() expects horizontal parameter to be one of (' + horizontalModes.join(', ') + ')');
}
options.widgetPositioning.horizontal = widgetPositioning.horizontal;
}
if (widgetPositioning.vertical) {
if (typeof widgetPositioning.vertical !== 'string') {
throw new TypeError('widgetPositioning() vertical variable must be a string');
}
widgetPositioning.vertical = widgetPositioning.vertical.toLowerCase();
if (verticalModes.indexOf(widgetPositioning.vertical) === -1) {
throw new TypeError('widgetPositioning() expects vertical parameter to be one of (' + verticalModes.join(', ') + ')');
}
options.widgetPositioning.vertical = widgetPositioning.vertical;
}
update();
return picker;
};
picker.calendarWeeks = function (calendarWeeks) {
if (arguments.length === 0) {
return options.calendarWeeks;
}
if (typeof calendarWeeks !== 'boolean') {
throw new TypeError('calendarWeeks() expects parameter to be a boolean value');
}
options.calendarWeeks = calendarWeeks;
update();
return picker;
};
picker.showTodayButton = function (showTodayButton) {
if (arguments.length === 0) {
return options.showTodayButton;
}
if (typeof showTodayButton !== 'boolean') {
throw new TypeError('showTodayButton() expects a boolean parameter');
}
options.showTodayButton = showTodayButton;
if (widget) {
hide();
show();
}
return picker;
};
picker.showClear = function (showClear) {
if (arguments.length === 0) {
return options.showClear;
}
if (typeof showClear !== 'boolean') {
throw new TypeError('showClear() expects a boolean parameter');
}
options.showClear = showClear;
if (widget) {
hide();
show();
}
return picker;
};
picker.widgetParent = function (widgetParent) {
if (arguments.length === 0) {
return options.widgetParent;
}
if (typeof widgetParent === 'string') {
widgetParent = $(widgetParent);
}
if (widgetParent !== null && (typeof widgetParent !== 'string' && !(widgetParent instanceof $))) {
throw new TypeError('widgetParent() expects a string or a jQuery object parameter');
}
options.widgetParent = widgetParent;
if (widget) {
hide();
show();
}
return picker;
};
picker.keepOpen = function (keepOpen) {
if (arguments.length === 0) {
return options.keepOpen;
}
if (typeof keepOpen !== 'boolean') {
throw new TypeError('keepOpen() expects a boolean parameter');
}
options.keepOpen = keepOpen;
return picker;
};
picker.focusOnShow = function (focusOnShow) {
if (arguments.length === 0) {
return options.focusOnShow;
}
if (typeof focusOnShow !== 'boolean') {
throw new TypeError('focusOnShow() expects a boolean parameter');
}
options.focusOnShow = focusOnShow;
return picker;
};
picker.inline = function (inline) {
if (arguments.length === 0) {
return options.inline;
}
if (typeof inline !== 'boolean') {
throw new TypeError('inline() expects a boolean parameter');
}
options.inline = inline;
return picker;
};
picker.clear = function () {
clear();
return picker;
};
picker.keyBinds = function (keyBinds) {
if (arguments.length === 0) {
return options.keyBinds;
}
options.keyBinds = keyBinds;
return picker;
};
picker.getMoment = function (d) {
return getMoment(d);
};
picker.debug = function (debug) {
if (typeof debug !== 'boolean') {
throw new TypeError('debug() expects a boolean parameter');
}
options.debug = debug;
return picker;
};
picker.allowInputToggle = function (allowInputToggle) {
if (arguments.length === 0) {
return options.allowInputToggle;
}
if (typeof allowInputToggle !== 'boolean') {
throw new TypeError('allowInputToggle() expects a boolean parameter');
}
options.allowInputToggle = allowInputToggle;
return picker;
};
picker.showClose = function (showClose) {
if (arguments.length === 0) {
return options.showClose;
}
if (typeof showClose !== 'boolean') {
throw new TypeError('showClose() expects a boolean parameter');
}
options.showClose = showClose;
return picker;
};
picker.keepInvalid = function (keepInvalid) {
if (arguments.length === 0) {
return options.keepInvalid;
}
if (typeof keepInvalid !== 'boolean') {
throw new TypeError('keepInvalid() expects a boolean parameter');
}
options.keepInvalid = keepInvalid;
return picker;
};
picker.datepickerInput = function (datepickerInput) {
if (arguments.length === 0) {
return options.datepickerInput;
}
if (typeof datepickerInput !== 'string') {
throw new TypeError('datepickerInput() expects a string parameter');
}
options.datepickerInput = datepickerInput;
return picker;
};
picker.parseInputDate = function (parseInputDate) {
if (arguments.length === 0) {
return options.parseInputDate;
}
if (typeof parseInputDate !== 'function') {
throw new TypeError('parseInputDate() sholud be as function');
}
options.parseInputDate = parseInputDate;
return picker;
};
picker.disabledTimeIntervals = function (disabledTimeIntervals) {
///<signature helpKeyword="$.fn.datetimepicker.disabledTimeIntervals">
///<summary>Returns an array with the currently set disabled dates on the component.</summary>
///<returns type="array">options.disabledTimeIntervals</returns>
///</signature>
///<signature>
///<summary>Setting this takes precedence over options.minDate, options.maxDate configuration. Also calling this function removes the configuration of
///options.enabledDates if such exist.</summary>
///<param name="dates" locid="$.fn.datetimepicker.disabledTimeIntervals_p:dates">Takes an [ string or Date or moment ] of values and allows the user to select only from those days.</param>
///</signature>
if (arguments.length === 0) {
return (options.disabledTimeIntervals ? $.extend({}, options.disabledTimeIntervals) : options.disabledTimeIntervals);
}
if (!disabledTimeIntervals) {
options.disabledTimeIntervals = false;
update();
return picker;
}
if (!(disabledTimeIntervals instanceof Array)) {
throw new TypeError('disabledTimeIntervals() expects an array parameter');
}
options.disabledTimeIntervals = disabledTimeIntervals;
update();
return picker;
};
picker.disabledHours = function (hours) {
///<signature helpKeyword="$.fn.datetimepicker.disabledHours">
///<summary>Returns an array with the currently set disabled hours on the component.</summary>
///<returns type="array">options.disabledHours</returns>
///</signature>
///<signature>
///<summary>Setting this takes precedence over options.minDate, options.maxDate configuration. Also calling this function removes the configuration of
///options.enabledHours if such exist.</summary>
///<param name="hours" locid="$.fn.datetimepicker.disabledHours_p:hours">Takes an [ int ] of values and disallows the user to select only from those hours.</param>
///</signature>
if (arguments.length === 0) {
return (options.disabledHours ? $.extend({}, options.disabledHours) : options.disabledHours);
}
if (!hours) {
options.disabledHours = false;
update();
return picker;
}
if (!(hours instanceof Array)) {
throw new TypeError('disabledHours() expects an array parameter');
}
options.disabledHours = indexGivenHours(hours);
options.enabledHours = false;
if (options.useCurrent && !options.keepInvalid) {
var tries = 0;
while (!isValid(date, 'h')) {
date.add(1, 'h');
if (tries === 24) {
throw 'Tried 24 times to find a valid date';
}
tries++;
}
setValue(date);
}
update();
return picker;
};
picker.enabledHours = function (hours) {
///<signature helpKeyword="$.fn.datetimepicker.enabledHours">
///<summary>Returns an array with the currently set enabled hours on the component.</summary>
///<returns type="array">options.enabledHours</returns>
///</signature>
///<signature>
///<summary>Setting this takes precedence over options.minDate, options.maxDate configuration. Also calling this function removes the configuration of options.disabledHours if such exist.</summary>
///<param name="hours" locid="$.fn.datetimepicker.enabledHours_p:hours">Takes an [ int ] of values and allows the user to select only from those hours.</param>
///</signature>
if (arguments.length === 0) {
return (options.enabledHours ? $.extend({}, options.enabledHours) : options.enabledHours);
}
if (!hours) {
options.enabledHours = false;
update();
return picker;
}
if (!(hours instanceof Array)) {
throw new TypeError('enabledHours() expects an array parameter');
}
options.enabledHours = indexGivenHours(hours);
options.disabledHours = false;
if (options.useCurrent && !options.keepInvalid) {
var tries = 0;
while (!isValid(date, 'h')) {
date.add(1, 'h');
if (tries === 24) {
throw 'Tried 24 times to find a valid date';
}
tries++;
}
setValue(date);
}
update();
return picker;
};
/**
* Returns the component's model current viewDate, a moment object or null if not set. Passing a null value unsets the components model current moment. Parsing of the newDate parameter is made using moment library with the options.format and options.useStrict components configuration.
* @param {Takes string, viewDate, moment, null parameter.} newDate
* @returns {viewDate.clone()}
*/
picker.viewDate = function (newDate) {
if (arguments.length === 0) {
return viewDate.clone();
}
if (!newDate) {
viewDate = date.clone();
return picker;
}
if (typeof newDate !== 'string' && !moment.isMoment(newDate) && !(newDate instanceof Date)) {
throw new TypeError('viewDate() parameter must be one of [string, moment or Date]');
}
viewDate = parseInputDate(newDate);
viewUpdate();
return picker;
};
// initializing element and component attributes
if (element.is('input')) {
input = element;
} else {
input = element.find(options.datepickerInput);
if (input.length === 0) {
input = element.find('input');
} else if (!input.is('input')) {
throw new Error('CSS class "' + options.datepickerInput + '" cannot be applied to non input element');
}
}
if (element.hasClass('input-group')) {
// in case there is more then one 'input-group-addon' Issue #48
if (element.find('.datepickerbutton').length === 0) {
component = element.find('.input-group-addon');
} else {
component = element.find('.datepickerbutton');
}
}
if (!options.inline && !input.is('input')) {
throw new Error('Could not initialize DateTimePicker without an input element');
}
// Set defaults for date here now instead of in var declaration
date = getMoment();
viewDate = date.clone();
$.extend(true, options, dataToOptions());
picker.options(options);
initFormatting();
attachDatePickerElementEvents();
if (input.prop('disabled')) {
picker.disable();
}
if (input.is('input') && input.val().trim().length !== 0) {
setValue(parseInputDate(input.val().trim()));
}
else if (options.defaultDate && input.attr('placeholder') === undefined) {
setValue(options.defaultDate);
}
if (options.inline) {
show();
}
return picker;
};
/********************************************************************************
*
* jQuery plugin constructor and defaults object
*
********************************************************************************/
/**
* See (http://jquery.com/).
* @name jQuery
* @class
* See the jQuery Library (http://jquery.com/) for full details. This just
* documents the function and classes that are added to jQuery by this plug-in.
*/
/**
* See (http://jquery.com/)
* @name fn
* @class
* See the jQuery Library (http://jquery.com/) for full details. This just
* documents the function and classes that are added to jQuery by this plug-in.
* @memberOf jQuery
*/
/**
* Show comments
* @class datetimepicker
* @memberOf jQuery.fn
*/
$.fn.datetimepicker = function (options) {
options = options || {};
var args = Array.prototype.slice.call(arguments, 1),
isInstance = true,
thisMethods = ['destroy', 'hide', 'show', 'toggle'],
returnValue;
if (typeof options === 'object') {
return this.each(function () {
var $this = $(this);
if (!$this.data('DateTimePicker')) {
// create a private copy of the defaults object
options = $.extend(true, {}, $.fn.datetimepicker.defaults, options);
$this.data('DateTimePicker', dateTimePicker($this, options));
}
});
} else if (typeof options === 'string') {
this.each(function () {
var $this = $(this),
instance = $this.data('DateTimePicker');
if (!instance) {
throw new Error('bootstrap-datetimepicker("' + options + '") method was called on an element that is not using DateTimePicker');
}
returnValue = instance[options].apply(instance, args);
isInstance = returnValue === instance;
});
if (isInstance || $.inArray(options, thisMethods) > -1) {
return this;
}
return returnValue;
}
throw new TypeError('Invalid arguments for DateTimePicker: ' + options);
};
$.fn.datetimepicker.defaults = {
timeZone: '',
format: false,
dayViewHeaderFormat: 'MMMM YYYY',
extraFormats: false,
stepping: 1,
minDate: false,
maxDate: false,
useCurrent: true,
collapse: true,
locale: moment.locale(),
defaultDate: false,
disabledDates: false,
enabledDates: false,
icons: {
time: 'glyphicon glyphicon-time',
date: 'glyphicon glyphicon-calendar',
up: 'glyphicon glyphicon-chevron-up',
down: 'glyphicon glyphicon-chevron-down',
previous: 'glyphicon glyphicon-chevron-left',
next: 'glyphicon glyphicon-chevron-right',
today: 'glyphicon glyphicon-screenshot',
clear: 'glyphicon glyphicon-trash',
close: 'glyphicon glyphicon-remove'
},
tooltips: {
today: 'Go to today',
clear: 'Clear selection',
close: 'Close the picker',
selectMonth: 'Select Month',
prevMonth: 'Previous Month',
nextMonth: 'Next Month',
selectYear: 'Select Year',
prevYear: 'Previous Year',
nextYear: 'Next Year',
selectDecade: 'Select Decade',
prevDecade: 'Previous Decade',
nextDecade: 'Next Decade',
prevCentury: 'Previous Century',
nextCentury: 'Next Century',
pickHour: 'Pick Hour',
incrementHour: 'Increment Hour',
decrementHour: 'Decrement Hour',
pickMinute: 'Pick Minute',
incrementMinute: 'Increment Minute',
decrementMinute: 'Decrement Minute',
pickSecond: 'Pick Second',
incrementSecond: 'Increment Second',
decrementSecond: 'Decrement Second',
togglePeriod: 'Toggle Period',
selectTime: 'Select Time'
},
useStrict: false,
sideBySide: false,
daysOfWeekDisabled: false,
calendarWeeks: false,
viewMode: 'days',
toolbarPlacement: 'default',
showTodayButton: false,
showClear: false,
showClose: false,
widgetPositioning: {
horizontal: 'auto',
vertical: 'auto'
},
widgetParent: null,
ignoreReadonly: false,
keepOpen: false,
focusOnShow: true,
inline: false,
keepInvalid: false,
datepickerInput: '.datepickerinput',
keyBinds: {
up: function (widget) {
if (!widget) {
return;
}
var d = this.date() || this.getMoment();
if (widget.find('.datepicker').is(':visible')) {
this.date(d.clone().subtract(7, 'd'));
} else {
this.date(d.clone().add(this.stepping(), 'm'));
}
},
down: function (widget) {
if (!widget) {
this.show();
return;
}
var d = this.date() || this.getMoment();
if (widget.find('.datepicker').is(':visible')) {
this.date(d.clone().add(7, 'd'));
} else {
this.date(d.clone().subtract(this.stepping(), 'm'));
}
},
'control up': function (widget) {
if (!widget) {
return;
}
var d = this.date() || this.getMoment();
if (widget.find('.datepicker').is(':visible')) {
this.date(d.clone().subtract(1, 'y'));
} else {
this.date(d.clone().add(1, 'h'));
}
},
'control down': function (widget) {
if (!widget) {
return;
}
var d = this.date() || this.getMoment();
if (widget.find('.datepicker').is(':visible')) {
this.date(d.clone().add(1, 'y'));
} else {
this.date(d.clone().subtract(1, 'h'));
}
},
left: function (widget) {
if (!widget) {
return;
}
var d = this.date() || this.getMoment();
if (widget.find('.datepicker').is(':visible')) {
this.date(d.clone().subtract(1, 'd'));
}
},
right: function (widget) {
if (!widget) {
return;
}
var d = this.date() || this.getMoment();
if (widget.find('.datepicker').is(':visible')) {
this.date(d.clone().add(1, 'd'));
}
},
pageUp: function (widget) {
if (!widget) {
return;
}
var d = this.date() || this.getMoment();
if (widget.find('.datepicker').is(':visible')) {
this.date(d.clone().subtract(1, 'M'));
}
},
pageDown: function (widget) {
if (!widget) {
return;
}
var d = this.date() || this.getMoment();
if (widget.find('.datepicker').is(':visible')) {
this.date(d.clone().add(1, 'M'));
}
},
enter: function () {
this.hide();
},
escape: function () {
this.hide();
},
//tab: function (widget) { //this break the flow of the form. disabling for now
// var toggle = widget.find('.picker-switch a[data-action="togglePicker"]');
// if(toggle.length > 0) toggle.click();
//},
'control space': function (widget) {
if (!widget) {
return;
}
if (widget.find('.timepicker').is(':visible')) {
widget.find('.btn[data-action="togglePeriod"]').click();
}
},
t: function () {
this.date(this.getMoment());
},
'delete': function () {
this.clear();
}
},
debug: false,
allowInputToggle: false,
disabledTimeIntervals: false,
disabledHours: false,
enabledHours: false,
viewDate: false
};
if (typeof module !== 'undefined') {
module.exports = $.fn.datetimepicker;
}
})); | PypiClean |
/DiscoPlot-1.0.2.tar.gz/DiscoPlot-1.0.2/docs/_build/html/_static/doctools.js | * select a different prefix for underscore
*/
$u = _.noConflict();
/**
* make the code below compatible with browsers without
* an installed firebug like debugger
if (!window.console || !console.firebug) {
var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
"dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
"profile", "profileEnd"];
window.console = {};
for (var i = 0; i < names.length; ++i)
window.console[names[i]] = function() {};
}
*/
/**
* small helper function to urldecode strings
*/
jQuery.urldecode = function(x) {
return decodeURIComponent(x).replace(/\+/g, ' ');
};
/**
* small helper function to urlencode strings
*/
jQuery.urlencode = encodeURIComponent;
/**
* This function returns the parsed url parameters of the
* current request. Multiple values per key are supported,
* it will always return arrays of strings for the value parts.
*/
jQuery.getQueryParameters = function(s) {
if (typeof s == 'undefined')
s = document.location.search;
var parts = s.substr(s.indexOf('?') + 1).split('&');
var result = {};
for (var i = 0; i < parts.length; i++) {
var tmp = parts[i].split('=', 2);
var key = jQuery.urldecode(tmp[0]);
var value = jQuery.urldecode(tmp[1]);
if (key in result)
result[key].push(value);
else
result[key] = [value];
}
return result;
};
/**
* highlight a given string on a jquery object by wrapping it in
* span elements with the given class name.
*/
jQuery.fn.highlightText = function(text, className) {
function highlight(node) {
if (node.nodeType == 3) {
var val = node.nodeValue;
var pos = val.toLowerCase().indexOf(text);
if (pos >= 0 && !jQuery(node.parentNode).hasClass(className)) {
var span = document.createElement("span");
span.className = className;
span.appendChild(document.createTextNode(val.substr(pos, text.length)));
node.parentNode.insertBefore(span, node.parentNode.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
node.nextSibling));
node.nodeValue = val.substr(0, pos);
}
}
else if (!jQuery(node).is("button, select, textarea")) {
jQuery.each(node.childNodes, function() {
highlight(this);
});
}
}
return this.each(function() {
highlight(this);
});
};
/**
* Small JavaScript module for the documentation.
*/
var Documentation = {
init : function() {
this.fixFirefoxAnchorBug();
this.highlightSearchWords();
this.initIndexTable();
},
/**
* i18n support
*/
TRANSLATIONS : {},
PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; },
LOCALE : 'unknown',
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
gettext : function(string) {
var translated = Documentation.TRANSLATIONS[string];
if (typeof translated == 'undefined')
return string;
return (typeof translated == 'string') ? translated : translated[0];
},
ngettext : function(singular, plural, n) {
var translated = Documentation.TRANSLATIONS[singular];
if (typeof translated == 'undefined')
return (n == 1) ? singular : plural;
return translated[Documentation.PLURALEXPR(n)];
},
addTranslations : function(catalog) {
for (var key in catalog.messages)
this.TRANSLATIONS[key] = catalog.messages[key];
this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
this.LOCALE = catalog.locale;
},
/**
* add context elements like header anchor links
*/
addContextElements : function() {
$('div[id] > :header:first').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this headline')).
appendTo(this);
});
$('dt[id]').each(function() {
$('<a class="headerlink">\u00B6</a>').
attr('href', '#' + this.id).
attr('title', _('Permalink to this definition')).
appendTo(this);
});
},
/**
* workaround a firefox stupidity
*/
fixFirefoxAnchorBug : function() {
if (document.location.hash && $.browser.mozilla)
window.setTimeout(function() {
document.location.href += '';
}, 10);
},
/**
* highlight the search words provided in the url in the text
*/
highlightSearchWords : function() {
var params = $.getQueryParameters();
var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
if (terms.length) {
var body = $('div.body');
if (!body.length) {
body = $('body');
}
window.setTimeout(function() {
$.each(terms, function() {
body.highlightText(this.toLowerCase(), 'highlighted');
});
}, 10);
$('<p class="highlight-link"><a href="javascript:Documentation.' +
'hideSearchWords()">' + _('Hide Search Matches') + '</a></p>')
.appendTo($('#searchbox'));
}
},
/**
* init the domain index toggle buttons
*/
initIndexTable : function() {
var togglers = $('img.toggler').click(function() {
var src = $(this).attr('src');
var idnum = $(this).attr('id').substr(7);
$('tr.cg-' + idnum).toggle();
if (src.substr(-9) == 'minus.png')
$(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
else
$(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
}).css('display', '');
if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
togglers.click();
}
},
/**
* helper function to hide the search marks again
*/
hideSearchWords : function() {
$('#searchbox .highlight-link').fadeOut(300);
$('span.highlighted').removeClass('highlighted');
},
/**
* make the url absolute
*/
makeURL : function(relativeURL) {
return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
},
/**
* get the current relative url
*/
getCurrentURL : function() {
var path = document.location.pathname;
var parts = path.split(/\//);
$.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
if (this == '..')
parts.pop();
});
var url = parts.join('/');
return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
}
};
// quick alias for translations
_ = Documentation.gettext;
$(document).ready(function() {
Documentation.init();
}); | PypiClean |
/dipex-4.54.5.tar.gz/dipex-4.54.5/exporters/utils/priority_by_class.py | from more_itertools import first
def choose_public_address_helper(
candidates, prioritized_classes, scope_getter, address_type_getter, uuid_getter
):
"""Pick the most desirable valid candidate address.
An address candidate is considered valid if its visibility is PUBLIC or UNSET.
An address candidates desirability is inversely propertional to its address
types position inside prioritized_classes. I.e. address candidates whose address
types occur earlier in prioritized_classes are more desirable than address
candidates whose address types occur later (or yet worse, not at all).
Additionally lower UUIDs are considered more desirable than larger ones, this is
to ensure a consistent order regardless of the order of the candidates.
Args:
candidates: List of Address entries.
prioritized_classes: List of Address Type UUIDs.
scope_getter: Function from address entry to visibility scope.
address_type_getter: Function from address entry to address type uuid.
uuid_getter: Function from address entry to address entry uuid.
Returns:
Address entry: The address entry that is the most desirable.
"""
def filter_by_visibility(candidate):
"""Predicate for filtering on visibility.
Args:
candidate: Address entry.
Returns:
bool: True for candidates with PUBLIC or UNSET visibility.
False otherwise.
"""
visibility_scope = scope_getter(candidate)
return visibility_scope is None or visibility_scope == "PUBLIC"
def determine_candidate_desirability(candidate):
"""Predicate for determining desirability of an address candidate.
The lower the value returned, the more desirable the candidate is.
Args:
candidate: Address entry.
Returns:
int: Index of the candidates address_type inside prioritized_classes.
Length of prioritized_classes if no match is found.
"""
address_type_uuid = address_type_getter(candidate)
try:
priority = prioritized_classes.index(address_type_uuid)
except ValueError:
priority = len(prioritized_classes)
return priority, uuid_getter(candidate)
# Filter candidates to only keep valid ones
candidates = filter(filter_by_visibility, candidates)
# If no prioritized_classes are provided, all the entries are equally desirable.
# Thus we can just return the entry with the lowest uuid.
if not prioritized_classes:
return min(candidates, key=uuid_getter, default=None)
# If prioritized_classes are provided, we want to return the most desirable one.
# The lowest index is the most desirable.
return min(candidates, key=determine_candidate_desirability, default=None)
def mora_choose_public_address(candidates, prioritized_classes):
"""See choose_public_address_helper.
Candidates are a list of MO address entries.
"""
def scope_getter(candidate):
if "visibility" not in candidate:
return None
if candidate["visibility"] is None:
return None
return candidate["visibility"]["scope"]
def address_type_getter(candidate):
return candidate["address_type"]["uuid"]
def uuid_getter(candidate):
return candidate["uuid"]
return choose_public_address_helper(
candidates,
prioritized_classes,
scope_getter,
address_type_getter,
uuid_getter,
)
def lc_choose_public_address(candidates, prioritized_classes, lc):
"""See choose_public_address_helper.
Candidates are a list of LoraCache address entries.
"""
def scope_getter(candidate):
if "visibility" not in candidate:
return None
if candidate["visibility"] is None:
return None
return lc.classes[candidate["visibility"]]["scope"]
def address_type_getter(candidate):
return candidate["adresse_type"]
def uuid_getter(candidate):
return candidate["uuid"]
return choose_public_address_helper(
candidates,
prioritized_classes,
scope_getter,
address_type_getter,
uuid_getter,
)
def lcdb_choose_public_address(candidates, prioritized_classes):
"""See choose_public_address_helper.
Candidates are a list of LoraCache sqlalchemy address entries.
"""
def scope_getter(candidate):
scope = candidate.synlighed_scope
return scope or None
def address_type_getter(candidate):
return candidate.adressetype_uuid
def uuid_getter(candidate):
return candidate.uuid
return choose_public_address_helper(
candidates,
prioritized_classes,
scope_getter,
address_type_getter,
uuid_getter,
)
def choose_public_address(candidates, prioritized_classes):
"""See mora_choose_public_address."""
return mora_choose_public_address(candidates, prioritized_classes) | PypiClean |
/Humongolus-1.0.7.tar.gz/Humongolus-1.0.7/humongolus/widget.py | import copy
from humongolus import Widget, Field, Document, EmbeddedDocument, Lazy, List, DocumentException, EMPTY
def escape(s):
orig = copy.copy(s)
try:
s = unicode(s)
return s.replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", ''')
except: return orig
class HTMLElement(Widget):
_type = "text"
_tag = "input"
_fields = []
def render_fields(self, namespace=None, **kwargs):
parts = []
for fi in self._fields:
try:
i = self.__dict__[fi]
ns = "-".join([namespace, i.attributes._name]) if namespace else i.attributes._name
if kwargs.get("render_labels", None):
label = "%s_%s" % (self.attributes.prepend, ns) if self.attributes.prepend else ns
if i.attributes.label: parts.append(self.render_label(label, i.attributes.label))
a = i.render(namespace=ns, **kwargs)
if isinstance(a, list): parts.extend(a)
else: parts.append(a)
except Exception as e:
print e
pass
return parts
def render_label(self, name, label):
return "<label for='%s'>%s</label>" % (name, label)
def label_tag(self):
return self.render_label(self.attributes.name, self.attributes.label)
def compile_tag(self, obj, close=True):
atts = ["<%s" % obj.pop("tag", "input")]
obj.update(obj.pop("extra", {}))
for k,v in obj.iteritems():
if v in EMPTY: continue
v = v if isinstance(v, list) else [v]
atts.append(u"%s='%s'" % (k, u" ".join([escape(val) for val in v])))
atts.append("/>" if close else ">")
return u" ".join(atts)
def __iter__(self):
for fi in self._fields:
v = self.__dict__[fi]
yield v
class Input(HTMLElement):
def render(self, *args, **kwargs):
self._type = kwargs.get("type", self._type)
self.attributes._name = kwargs.get("namespace", self.attributes._name)
self.attributes._id = kwargs.get("id", "id_%s"%self.attributes._name)
self.attributes.value = kwargs.get("value", self.object.__repr__())
self.attributes.description = kwargs.get("description", self.attributes.description)
self.attributes.cls = kwargs.get("cls", self.attributes.cls)
self.attributes.label = kwargs.get("label", self.attributes.label)
self.attributes.extra = kwargs.get("extra", self.attributes.extra)
obj = {
"tag":self._tag,
"type":self._type,
"id":self.attributes.id,
"name":self.attributes.name,
"value":self.attributes.value,
"class":self.attributes.cls,
"extra":self.attributes.extra
}
return self.compile_tag(obj)
class Password(Input):
_type = "password"
class CheckBox(Input):
_type = "checkbox"
def render(self, *args, **kwargs):
extra = {"checked":'CHECKED'} if self.object._value else {}
kwargs["extra"] = extra
kwargs["value"] = self.attributes._name
return super(CheckBox, self).render(*args, **kwargs)
class Select(Input):
def render(self, *args, **kwargs):
val = super(Select, self).render(*args, **kwargs)
obj = {
"tag":"select",
"id":self.attributes.id,
"name":self.attributes.name,
"class":self.attributes.cls,
"extra":self.attributes.extra
}
st = self.compile_tag(obj, close=False)
ch = []
for i in self.object.get_choices(render=self.attributes.item_render):
val = i['value'] if isinstance(i, dict) else i
display = i['display'] if isinstance(i, dict) else i
sel = "selected='SELECTED'" if val == self.object._value else ""
ch.append("<option value='%s' %s>%s</option>" % (val, sel, display))
return "%s%s</select>" % (st, "".join(ch))
class MultipleSelect(Input):
def render(self, *args, **kwargs):
val = super(MultipleSelect, self).render(*args, **kwargs)
obj = {
"tag":"select",
"id":self.attributes.id,
"name":self.attributes.name,
"class":self.attributes.cls,
"extra":self.attributes.extra
}
st = self.compile_tag(obj, close=False)
ch = []
for i in self.object.get_choices(render=self.attributes.item_render):
val = i['value'] if isinstance(i, dict) else i
display = i['display'] if isinstance(i, dict) else i
sel = "selected='SELECTED'" if val in self.object else ""
ch.append("<option value='%s' %s>%s</option>" % (val, sel, display))
return "%s%s</select>" % (st, "".join(ch))
class TextArea(Input):
def render(self, *args, **kwargs):
val = super(TextArea, self).render(*args, **kwargs)
obj = {
"tag":"textarea",
"id":self.attributes.id,
"name":self.attributes.name,
"class":self.attributes.cls,
"cols":self.attributes.cols,
"rows":self.attributes.rows,
"extra":self.attributes.extra
}
st = self.compile_tag(obj, close=False)
return "%s%s</textarea>" % (st, self.attributes.value if self.attributes.value else "")
class FieldSet(HTMLElement):
def render(self, *args, **kwargs):
val = super(FieldSet, self).render(*args, **kwargs)
parts = []
obj = {
"tag":"fieldset",
"id":self.attributes.id,
"name":self.attributes.name,
"cls":self.attributes.cls,
"extra":self.attributes.extra
}
st = self.compile_tag(obj, close=False)
ns = kwargs.pop('namespace', None)
parts.append(st)
parts.extend(self.render_fields(namespace=ns, **kwargs))
parts.append("</fieldset>")
return parts
class Form(HTMLElement):
#Attributes
errors = {}
def render(self, *args, **kwargs):
val = super(Form, self).render(*args, **kwargs)
parts = []
obj = {
"tag":"form",
"id":self.attributes.id,
"name":self.attributes.name,
"class":self.attributes.cls,
"action":self.attributes.action,
"method":self.attributes.method,
"type":self.attributes.type,
"extra":self.attributes.extra
}
st = self.compile_tag(obj, close=False)
parts.append(st)
parts.extend(self.render_fields(**kwargs))
parts.append(self.submit())
parts.append("</form>")
return "".join(parts)
def parse_data(self, data):
obj = {}
for k,v in data.iteritems():
key = k[len(self._prepend)+1:] if self._prepend else k
parts = key.split('-')
branch = obj
for part in parts[0:-1]:
branch = branch.setdefault(part, {})
branch[parts[-1]] = v
return obj
def validate(self):
if self._data:
obj = self.parse_data(self._data)
print obj
self.object._map(obj)
errors = self.object._errors()
if len(errors.keys()):
for k,v in errors.iteritems():
try:
self.__dict__[k].errors.append(v)
except: pass
self.errors = errors
raise DocumentException(errors=errors)
def submit(self):
return "<input type='submit' value='submit' />" | PypiClean |
/Cibyl-1.0.0.0rc1.tar.gz/Cibyl-1.0.0.0rc1/docs/source/parser.rst | Parser
======
Cibyl provides two sources of user input, the configuration file and the command
line arguments. The configuration file details the ci environment that the user
wants to query, while the command line arguments tell Cibyl what the user wants
to query.
Cibyl's cli is divided in several subcommands. The parser is the component
responsible for bringing all the subcommands together and ensuring the
corresponding arguments are added. In the case of the ``features`` subcommands
that is simple, since it only has one argument. The case of the ``query``
sucommand is different, since the cli arguments are extended dynamically depending on the
contents of the configuration.
.. note::
The rest of this page is relevant **only** for the ``query`` subcommand.
When running ``cibyl query -h`` only the arguments that are relevant to the user,
according to its configuration, will be shown. If there is no configuration
file, Cibyl will just print a few general arguments when calling ``cibyl query -h``.
If the configuration is populated then arguments will be added depending on its contents.
The parser is extended using a hierarchy of CI models. This hierarchy is
Cibyl's internal representation of the CI environments. The models are created after reading the
configuration and the hierarchy is implicitely defined in the API attribute of
said models. For example, one environment might include a Jenkins instance as
CI system, and have it also as source for information, in addition to an
ElasticSearch instance as a second source. With this environment, if the user
runs ``cibyl query -h``, it will show arguments that are relevant to a Jenkins
system, like ``--jobs``, ``--builds`` or ``--build-status``. In such a case it will
not show arguments like ``--pipelines`` which would be useful if the CI system
was a Zuul instance.
The API of a CI model is a dictionary with the following structure (extracted
from the System API)::
API = {
'name': {
'attr_type': str,
'arguments': []
},
'sources': {
'attr_type': Source,
'attribute_value_class': AttributeListValue,
'arguments': [Argument(name='--sources', arg_type=str,
nargs="*",
description="Source name")]
},
'jobs': {'attr_type': Job,
'attribute_value_class': AttributeDictValue,
'arguments': [Argument(name='--jobs', arg_type=str,
nargs='*',
description="System jobs",
func='get_jobs')]}
}
each key corresponds to the name of an attribute, and the value is another
dictionary with attribute-related information. At this point we need to
distinguish between arguments and attributes. In Cibyl an ``Argument`` is the object
that is obtained from parsing the user input. The values passed to each option
like ``--debug`` or ``--jobs`` are stored in an ``Argument``. Attributes correspond to the actual
key-value pairs in the API. An attribute has an ``attribute_value_class`` which
by default is ``AttributeValue``, but can also be ``AttributeDictValue`` and ``AttributeListValue``.
The difference between the three is the how they store the arguments. The first
is intended to hold a single option (things like name, type, etc.). While the
other two hold a collection of values either in a dictionary or a list (hence
the name). The information provided by the user is accessible throgh the
``value`` field of any ``Attribute`` class.
Each API element has also an `attr_type`, which describes what kind of object
will it hold. In the example above `name` will hold a string, while `jobs`
will hold a dictonary of Job objects. This allows us to establish the
hierarchy mentioned previously, by checking if the `attr_type` field is not
a builtin type. Finally, there is an `arguments` field, which associates the
actual options that will be shown in the cli with an attribute. An attribute may
have no arguments, one argument or multiple arguments associated with it.
``Argument`` objects have a set of options to configure the behavior of the
cli. The `name` determines the option that will be shown, `arg_type` specifies
the type used to store the user input (str, int, etc.), `nargs` and
`description` have the same meaning as they do in the arparse module.
The `level` argument, measures how deep in the hierarchy
a given model is. Finally, we see the `func` argument, which points to the
method a source must implement in order to provide information about a certain
model. In the example shown here, only jobs has an argument with `func`
defined, as it is the only CI model present. If the user runs a query like::
cibyl query --jobs
then Cibyl will look at the sources defined and check whether any has a method
``get_jobs``, and if it finds one it will use it to get all the jobs available
in that source.
Arguments are added to the application parser in the ``extend_parser`` method
of the ``Orchestrator`` class. This method loops through the API of a model
(in the first call it will be an ``Environment`` model) and adds its arguments. If any
of the API elements is a CI model, the element's API is recursively used to
augment the parser. As the ``extend_parser`` method iterates through the model
hierarchy, it creates a graph of the relationships between query methods (the
sources' methods that are added to the arguments' `func` attribute). The edges
of the graph are created when a new recursive call is made. As an example, when
exploring the API for the Job model, we know that the arguments will call
``get_jobs``, so when a new call is made for the Build API, a new edge wil be
created from ``get_jobs`` to all the new query methods that are found, in this
case it will be ``get_builds``.
For each recursive call, the **level** is increased.
The level parameter is key to identify the source of information for the query
that the user sends. In the Jenkins environment example mentioned before,
we may have a hierarchy like::
Environment => System => Job => Build
where each at each step we increase the level by 1. We can then parse the cli
arguments and sort by decreasing level. To select which query method should be
called, cibyl relies on the graph constructed during the call to
``extend_parser``. It iterates over the sorted list of arguments and for each
of them constructs a path to the root of the graph. The intermediate nodes in
this path are removed from the list of arguments to query, since by the
hierarchical nature of the relationship between the models, calling an
argument's `func` makes the call to the argument's parent `func` redundant.
In the example above, *Build* is the model with the largest level. If we assume that
user has made a call like ``cibyl --jobs --builds``, we want to query the sources for builds,
but we known that each build will be associated with a job, and each job will be associated with
a system, etc. We also know that after calling ``get_builds``, we will not need
to call ``get_jobs``. Thus we get a sorted list of arguments, which is [`builds`, `jobs`].
We create a path from `builds` to the root of the graph, which in the case of
a Jenkins systems is `jobs` (for a zuul system this would be more complex).
After iterating over the path, we remove `jobs` from the list of arguments to
query, since `builds` already will provide the `jobs` information.
| PypiClean |
/OTLModel/Classes/Onderdeel/Buisbekleding.py | from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.ImplementatieElement.AIMObject import AIMObject
from OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument
from OTLMOW.OTLModel.Datatypes.KlBekledingPlaats import KlBekledingPlaats
from OTLMOW.OTLModel.Datatypes.KlBuisbekledingUitvoeringswijze import KlBuisbekledingUitvoeringswijze
from OTLMOW.OTLModel.Datatypes.KwantWrdInMeter import KwantWrdInMeter
from OTLMOW.OTLModel.Datatypes.KwantWrdInMillimeter import KwantWrdInMillimeter
from OTLMOW.GeometrieArtefact.LijnGeometrie import LijnGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Buisbekleding(AIMObject, LijnGeometrie):
"""De bekleding of coating ter bescherming van de buis."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Buisbekleding'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
AIMObject.__init__(self)
LijnGeometrie.__init__(self)
self._laagdikte = OTLAttribuut(field=KwantWrdInMillimeter,
naam='laagdikte',
label='Laagdikte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Buisbekleding.laagdikte',
definition='De dikte van de bekledingslaag in millimeter.',
owner=self)
self._lengte = OTLAttribuut(field=KwantWrdInMeter,
naam='lengte',
label='Lengte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Buisbekleding.lengte',
definition='De totale lengte van de buisbekleding in lopende meter.',
owner=self)
self._plaats = OTLAttribuut(field=KlBekledingPlaats,
naam='plaats',
label='plaats',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Buisbekleding.plaats',
definition='De kant waar de bekleding van de buis zich bevindt.',
owner=self)
self._technischeFiche = OTLAttribuut(field=DtcDocument,
naam='technischeFiche',
label='technische fiche',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Buisbekleding.technischeFiche',
definition='De technische fiche van de buisbekleding.',
owner=self)
self._tot = OTLAttribuut(field=KwantWrdInMeter,
naam='tot',
label='tot',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Buisbekleding.tot',
definition='Het einde van de buisbekleding in meter ten opzichte van de beginput van de buis.',
owner=self)
self._uitvoeringswijze = OTLAttribuut(field=KlBuisbekledingUitvoeringswijze,
naam='uitvoeringswijze',
label='uitvoeringswijze',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Buisbekleding.uitvoeringswijze',
definition='Materiaal en manier van aanbrengen van de buisbekleding.',
owner=self)
self._van = OTLAttribuut(field=KwantWrdInMeter,
naam='van',
label='van',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Buisbekleding.van',
definition='Het begin van de buisbekleding in meter ten opzichte van de beginput van de leiding.',
owner=self)
@property
def laagdikte(self):
"""De dikte van de bekledingslaag in millimeter."""
return self._laagdikte.get_waarde()
@laagdikte.setter
def laagdikte(self, value):
self._laagdikte.set_waarde(value, owner=self)
@property
def lengte(self):
"""De totale lengte van de buisbekleding in lopende meter."""
return self._lengte.get_waarde()
@lengte.setter
def lengte(self, value):
self._lengte.set_waarde(value, owner=self)
@property
def plaats(self):
"""De kant waar de bekleding van de buis zich bevindt."""
return self._plaats.get_waarde()
@plaats.setter
def plaats(self, value):
self._plaats.set_waarde(value, owner=self)
@property
def technischeFiche(self):
"""De technische fiche van de buisbekleding."""
return self._technischeFiche.get_waarde()
@technischeFiche.setter
def technischeFiche(self, value):
self._technischeFiche.set_waarde(value, owner=self)
@property
def tot(self):
"""Het einde van de buisbekleding in meter ten opzichte van de beginput van de buis."""
return self._tot.get_waarde()
@tot.setter
def tot(self, value):
self._tot.set_waarde(value, owner=self)
@property
def uitvoeringswijze(self):
"""Materiaal en manier van aanbrengen van de buisbekleding."""
return self._uitvoeringswijze.get_waarde()
@uitvoeringswijze.setter
def uitvoeringswijze(self, value):
self._uitvoeringswijze.set_waarde(value, owner=self)
@property
def van(self):
"""Het begin van de buisbekleding in meter ten opzichte van de beginput van de leiding."""
return self._van.get_waarde()
@van.setter
def van(self, value):
self._van.set_waarde(value, owner=self) | PypiClean |
/BlitzChain-0.8.2.tar.gz/BlitzChain-0.8.2/blitzchain/splitter.py | from typing import List, Dict
class WordSplitter:
def split_text_into_array(
self,
text: str,
max_word_count: int = 200,
overlap: int = 10,
chunksize: int = 20,
) -> List[str]:
"""Splits the input text into an array of strings with each element not exceeding the specified max_word_count. Allows an overlapping number of words."""
words = text.split()
word_count = len(words)
if word_count <= max_word_count:
yield [text]
segments = []
start = 0
while start < word_count:
end = min(start + max_word_count, word_count)
segment_words = words[start:end]
segment_text = " ".join(segment_words)
segments.append(segment_text)
if len(segments) > chunksize:
yield segments
segments = []
start = end if end == word_count else end - overlap
# if segments == []:
# return
yield segments
def split_object_text(
self,
dicts: List[Dict],
key: str,
max_word_count: int = 200,
overlap: int = 10,
chunksize: int = 20,
) -> List[dict]:
"""Splits the text of a specified key in a list of dictionaries, creating a new dictionary for each split text segment."""
print("splitting text...")
split_dicts = []
for d in dicts:
if key in d:
text = d[key]
print("split array")
split_text = self.split_text_into_array(text, max_word_count, overlap)
for segment in split_text:
new_d = d.copy()
new_d[key] = segment
split_dicts.append(new_d)
else:
split_dicts.append(d)
if len(chunksize) % chunksize == 0:
yield split_dicts
split_dicts = []
if split_dicts == []:
yield None
yield split_dicts | PypiClean |
/Absinthe-1.1.0.tar.gz/Absinthe-1.1.0/absinthe/tools/background_process_handler.py | import os
import time
import subprocess
from signal import SIGTERM
from utils import SimpleResponse
class BackgroundProcessHandler(object):
def __init__(self, command, pid_file, logger):
self.pid_file = pid_file
self.logger = logger
self.command = command
def start(self):
if self.is_running():
return SimpleResponse(False, 'Daemon is already running. Pid: %d' % self.get_pid())
pid = subprocess.Popen(self.command).pid
file(self.pid_file,'w+').write("%s\n" % pid)
return SimpleResponse(True, 'Started (pid: %s)' % pid)
def get_pid(self):
pid = None
try:
with open(self.pid_file, 'r') as f:
try:
pid = int(f.read().strip())
except TypeError as e:
pid = None
except IOError:
pid = None
return pid
def is_running(self):
pid = self.get_pid()
if pid is None:
return False
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def status(self):
return SimpleResponse(True, 'Daemon is ' + ('running' if self.is_running() else 'not running'))
def stop(self):
# Get the pid from the pidfile
pid = self.get_pid()
if not pid:
message = "Pidfile %s does not exist" % self.pid_file
self.logger.error(message)
return SimpleResponse(False, 'Daemon is not running')
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pid_file):
os.remove(self.pid_file)
else:
self.logger.error(err)
sys.exit(1)
return SimpleResponse(True, 'Daemon is stopped')
def restart(self):
self.stop()
return self.start() | PypiClean |
/Flask-Runner-2.1.1.tar.gz/Flask-Runner-2.1.1/flask_runner.py | import sys
import os
import argparse
from flask.ext.script import Manager as BaseManager, Server as BaseServer, Shell, Command, Option
class Server(BaseServer):
def get_options(self):
options = super(Server, self).get_options()
options += (
Option('--noeval',
dest = 'use_evalex',
action = 'store_false',
default = True,
help = 'disable exception evaluation in the debugger'),
Option('--extra',
metavar = 'FILE',
type = str,
dest = 'extra_files',
action = 'append',
help = 'additional file for the reloader to watch for changes'),
Option('--profile',
action = 'store_true',
default = False,
help = 'run the profiler for each request'),
Option('--profile-count',
metavar = 'COUNT',
type = int,
dest = 'profile_restrictions',
action = 'append',
help = 'restrict profiler output to the top COUNT lines'),
Option('--profile-percent',
metavar = 'PERCENT',
type = float,
dest = 'profile_restrictions',
action = 'append',
help = 'restrict profiler output to the top PERCENT lines'),
Option('--profile-regex',
metavar = 'REGEX',
type = str,
dest = 'profile_restrictions',
action = 'append',
help = 'filter profiler output with REGEX'),
Option('--profile-dir',
metavar = 'DIR',
default = None,
help = 'write profiler results one file per request in folder DIR'),
Option('--lint',
action = 'store_true',
default = False,
help = 'run the lint validation middleware'),
)
return options
def handle(self, app, *args, **kwargs):
#host, port, use_debugger, use_reloader,
#threaded, processes, passthrough_errors, use_evalex,
#extra_files, profile, profile_restrictions, profile_dir, lint):
# we don't need to run the server in request context
# so just run it directly
profile = kwargs['profile']
profile_restrictions = kwargs['profile_restrictions'] or ()
profile_dir = kwargs['profile_dir']
lint = kwargs['lint']
if profile:
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app,
restrictions = profile_restrictions, profile_dir = profile_dir)
if lint:
from werkzeug.contrib.lint import LintMiddleware
app.wsgi_app = LintMiddleware(app.wsgi_app)
app.run(host = kwargs['host'],
port = kwargs['port'],
use_debugger = kwargs['use_debugger'],
use_reloader = kwargs['use_reloader'],
threaded = kwargs['threaded'],
processes = kwargs['processes'],
passthrough_errors = kwargs['passthrough_errors'],
use_evalex = kwargs['use_evalex'],
extra_files = kwargs['extra_files'])
class Test(Command):
description = 'Runs unit tests.'
def get_options(self):
return (Option('-c', '--with-coverage',
dest = 'coverage',
action = 'store_true',
help = 'Include coverage report'),)
def run(self, coverage):
options = ""
if coverage:
options += ' --with-coverage --cover-package=app'
os.system('nosetests' + options)
class Manager(BaseManager):
def __init__(self, app=None, with_default_commands=None, usage=None):
super(Manager, self).__init__(app, with_default_commands = False, usage = usage)
if with_default_commands or (app and with_default_commands is None):
self.add_default_commands()
def make_shell_context(self):
d = dict(app = self.app)
try:
from app import db
d['db'] = db
except:
pass
try:
from app import models
d['models'] = models
except:
pass
return d
def add_default_commands(self):
self.add_command("runserver", Server())
self.add_command("shell", Shell(make_context = self.make_shell_context))
self.add_command("test", Test())
class Runner(object):
def __init__(self, app):
self.app = app
def handle(self, prog, args = None):
server = Server()
arg_parser = server.create_parser(prog)
args = arg_parser.parse_args(args)
server.handle(self.app, **args.__dict__)
def run(self):
self.handle(sys.argv[0], sys.argv[1:]) | PypiClean |
/Lantz-0.3.zip/Lantz-0.3/lantz/drivers/legacy/ni/daqmx/tasks.py | import numpy as np
from lantz import Feat, Action
from lantz.foreign import RetStr, RetTuple, RetValue
from .base import Task, Channel
from .constants import Constants
_GROUP_BY = {'scan': Constants.Val_GroupByScanNumber,
'channel': Constants.Val_GroupByChannel}
class AnalogInputTask(Task):
"""Analog Input Task
"""
IO_TYPE = 'AI'
@Feat()
def max_convert_rate(self):
"""Maximum convert rate supported by the task, given the current
devices and channel count.
This rate is generally faster than the default AI Convert
Clock rate selected by NI-DAQmx, because NI-DAQmx adds in an
additional 10 microseconds per channel settling time to
compensate for most potential system settling constraints.
For single channel tasks, the maximum AI Convert Clock rate is the
maximum rate of the ADC. For multiple channel tasks, the maximum
AI Convert Clock rate is the maximum convert rate of the analog
hardware, including the ADC, filters, multiplexers, and amplifiers.
Signal conditioning accessories can further constrain the maximum AI
Convert Clock based on timing and settling requirements.
"""
err, value = self.lib.GetAIConvMaxRate(RetValue('f64'))
return value
def read_scalar(self, timeout=10.0):
"""Return a single floating-point sample from a task that
contains a single analog input channel.
:param timeout: The amount of time, in seconds, to wait for the function to
read the sample(s). The default value is 10.0 seconds. To
specify an infinite wait, pass -1 (DAQmx_Val_WaitInfinitely).
This function returns an error if the timeout elapses.
A value of 0 indicates to try once to read the requested
samples. If all the requested samples are read, the function
is successful. Otherwise, the function returns a timeout error
and returns the samples that were actually read.
:rtype: float
"""
err, value = self.lib.ReadAnalogScalarF64(timeout, RetValue('f64'), None)
return value
@Action(units=(None, 'seconds', None), values=(None, None, _GROUP_BY))
def read(self, samples_per_channel=None, timeout=10.0, group_by='channel'):
"""Reads multiple floating-point samples from a task that
contains one or more analog input channels.
:param samples_per_channel:
The number of samples, per channel, to read. The default
value of -1 (DAQmx_Val_Auto) reads all available samples. If
readArray does not contain enough space, this function
returns as many samples as fit in readArray.
NI-DAQmx determines how many samples to read based on
whether the task acquires samples continuously or acquires a
finite number of samples.
If the task acquires samples continuously and you set this
parameter to -1, this function reads all the samples
currently available in the buffer.
If the task acquires a finite number of samples and you set
this parameter to -1, the function waits for the task to
acquire all requested samples, then reads those samples. If
you set the Read All Available Samples property to TRUE, the
function reads the samples currently available in the buffer
and does not wait for the task to acquire all requested
samples.
:param timeout: float
The amount of time, in seconds, to wait for the function to
read the sample(s). The default value is 10.0 seconds. To
specify an infinite wait, pass -1
(DAQmx_Val_WaitInfinitely). This function returns an error
if the timeout elapses.
A value of 0 indicates to try once to read the requested
samples. If all the requested samples are read, the function
is successful. Otherwise, the function returns a timeout
error and returns the samples that were actually read.
:param group_by:
'channel'
Group by channel (non-interleaved)::
ch0:s1, ch0:s2, ..., ch1:s1, ch1:s2,..., ch2:s1,..
'scan'
Group by scan number (interleaved)::
ch0:s1, ch1:s1, ch2:s1, ch0:s2, ch1:s2, ch2:s2,...
:rtype: numpy.ndarray
"""
if samples_per_channel is None:
samples_per_channel = self.samples_per_channel_available()
number_of_channels = self.number_of_channels()
if group_by == Constants.Val_GroupByScanNumber:
data = np.zeros((samples_per_channel, number_of_channels), dtype=np.float64)
else:
data = np.zeros((number_of_channels, samples_per_channel), dtype=np.float64)
err, data, count = self.lib.ReadAnalogF64(samples_per_channel, timeout, group_by,
data.ctypes.data, data.size, RetValue('i32'), None)
if samples_per_channel < count:
if group_by == 'scan':
return data[:count]
else:
return data[:,:count]
return data
class AnalogOutputTask(Task):
"""Analog Output Task
"""
CHANNEL_TYPE = 'AO'
@Action(units=(None, None, 'seconds', None), values=(None, None, None, _GROUP_BY))
def write(self, data, auto_start=True, timeout=10.0, group_by='scan'):
"""Write multiple floating-point samples or a scalar to a task
that contains one or more analog output channels.
Note: If you configured timing for your task, your write is
considered a buffered write. Buffered writes require a minimum
buffer size of 2 samples. If you do not configure the buffer
size using DAQmxCfgOutputBuffer, NI-DAQmx automatically
configures the buffer when you configure sample timing. If you
attempt to write one sample for a buffered write without
configuring the buffer, you will receive an error.
:param data: The array of 64-bit samples to write to the task
or a scalar.
:param auto_start: Whether or not this function automatically starts
the task if you do not start it.
:param timeout: The amount of time, in seconds, to wait for this
function to write all the samples. The default value is 10.0
seconds. To specify an infinite wait, pass -1
(DAQmx_Val_WaitInfinitely). This function returns an error
if the timeout elapses.
A value of 0 indicates to try once to write the submitted
samples. If this function successfully writes all submitted
samples, it does not return an error. Otherwise, the
function returns a timeout error and returns the number of
samples actually written.
:param group_by: how the samples are arranged, either interleaved or noninterleaved
'channel' - Group by channel (non-interleaved).
'scan' - Group by scan number (interleaved).
:return: The actual number of samples per channel successfully
written to the buffer.
"""
if np.isscalar(data):
err = self.lib.WriteAnalogScalarF64(bool32(auto_start),
float64(timeout),
float64(data), None)
return 1
data = np.asarray(data, dtype = np.float64)
number_of_channels = self.number_of_channels()
if data.ndims == 1:
if number_of_channels == 1:
samples_per_channel = data.shape[0]
shape = (samples_per_channel, 1)
else:
samples_per_channel = data.size / number_of_channels
shape = (samples_per_channel, number_of_channels)
if not group_by == Constants.Val_GroupByScanNumber:
shape = tuple(reversed(shape))
data.reshape(shape)
else:
if group_by == Constants.Val_GroupByScanNumber:
samples_per_channel = data.shape[0]
else:
samples_per_channel = data.shape[-1]
err, count = self.lib.WriteAnalogF64(samples_per_channel, auto_start,
timeout, group_by,
data.ctypes.data, RetValue('i32'),
None)
return count
class DigitalTask(Task):
@Action(units=(None, 'seconds', None), values=(None, None, _GROUP_BY))
def read(self, samples_per_channel=None, timeout=10.0, group_by='scan'):
"""
Reads multiple samples from each digital line in a task. Each
line in a channel gets one byte per sample.
:param samples_per_channel: int or None
The number of samples, per channel, to
read. The default value of -1 (DAQmx_Val_Auto) reads all
available samples. If readArray does not contain enough
space, this function returns as many samples as fit in
readArray.
NI-DAQmx determines how many samples to read based on
whether the task acquires samples continuously or acquires a
finite number of samples.
If the task acquires samples continuously and you set this
parameter to -1, this function reads all the samples
currently available in the buffer.
If the task acquires a finite number of samples and you set
this parameter to -1, the function waits for the task to
acquire all requested samples, then reads those samples. If
you set the Read All Available Data property to TRUE, the
function reads the samples currently available in the buffer
and does not wait for the task to acquire all requested
samples.
:param timeout: float
The amount of time, in seconds, to wait for the function to
read the sample(s). The default value is 10.0 seconds. To
specify an infinite wait, pass -1
(DAQmx_Val_WaitInfinitely). This function returns an error
if the timeout elapses.
A value of 0 indicates to try once to read the requested
samples. If all the requested samples are read, the function
is successful. Otherwise, the function returns a timeout
error and returns the samples that were actually read.
:param group_by: {'group', 'scan'}
Specifies whether or not the samples are interleaved:
'channel' - Group by channel (non-interleaved).
'scan' - Group by scan number (interleaved).
Returns
-------
data : array
The array to read samples into. Each `bytes_per_sample`
corresponds to one sample per channel, with each element
in that grouping corresponding to a line in that channel,
up to the number of lines contained in the channel.
bytes_per_sample : int
The number of elements in returned `data` that constitutes
a sample per channel. For each sample per channel,
`bytes_per_sample` is the number of bytes that channel
consists of.
"""
if samples_per_channel in (None, -1):
samples_per_channel = self.samples_per_channel_available()
if self.one_channel_for_all_lines:
nof_lines = []
for channel in self.names_of_channels():
nof_lines.append(self.number_of_lines (channel))
c = int (max (nof_lines))
dtype = getattr(np, 'uint%s' % (8 * c))
else:
c = 1
dtype = np.uint8
number_of_channels = self.number_of_channels()
if group_by == Constants.Val_GroupByScanNumber:
data = np.zeros((samples_per_channel, number_of_channels),dtype=dtype)
else:
data = np.zeros((number_of_channels, samples_per_channel),dtype=dtype)
err, count, bps = self.lib.ReadDigitalLines(samples_per_channel, float64 (timeout),
group_by, data.ctypes.data, uInt32 (data.size * c),
RetValue('i32'), RetValue('i32'),
None
)
if count < samples_per_channel:
if group_by == 'scan':
return data[:count], bps
else:
return data[:,:count], bps
return data, bps
class DigitalInputTask(DigitalTask):
"""Exposes NI-DAQmx digital input task to Python.
"""
CHANNEL_TYPE = 'DI'
class DigitalOutputTask(DigitalTask):
"""Exposes NI-DAQmx digital output task to Python.
"""
CHANNEL_TYPE = 'DO'
@Action(units=(None, None, 'seconds', None), values=(None, {True, False}, None, _GROUP_BY))
def write(self, data, auto_start=True, timeout=10.0, group_by='scan'):
"""
Writes multiple samples to each digital line in a task. When
you create your write array, each sample per channel must
contain the number of bytes returned by the
DAQmx_Read_DigitalLines_BytesPerChan property.
Note: If you configured timing for your task, your write is
considered a buffered write. Buffered writes require a minimum
buffer size of 2 samples. If you do not configure the buffer
size using DAQmxCfgOutputBuffer, NI-DAQmx automatically
configures the buffer when you configure sample timing. If you
attempt to write one sample for a buffered write without
configuring the buffer, you will receive an error.
Parameters
----------
data : array
The samples to write to the task.
auto_start : bool
Specifies whether or not this function automatically starts
the task if you do not start it.
timeout : float
The amount of time, in seconds, to wait for this function to
write all the samples. The default value is 10.0 seconds. To
specify an infinite wait, pass -1
(DAQmx_Val_WaitInfinitely). This function returns an error
if the timeout elapses.
A value of 0 indicates to try once to write the submitted
samples. If this function successfully writes all submitted
samples, it does not return an error. Otherwise, the
function returns a timeout error and returns the number of
samples actually written.
layout : {'group_by_channel', 'group_by_scan_number'}
Specifies how the samples are arranged, either interleaved
or noninterleaved:
'group_by_channel' - Group by channel (non-interleaved).
'group_by_scan_number' - Group by scan number (interleaved).
"""
number_of_channels = self.get_number_of_channels()
if np.isscalar(data):
data = np.array([data]*number_of_channels, dtype = np.uint8)
else:
data = np.asarray(data, dtype = np.uint8)
if data.ndims == 1:
if number_of_channels == 1:
samples_per_channel = data.shape[0]
shape = (samples_per_channel, 1)
else:
samples_per_channel = data.size / number_of_channels
shape = (samples_per_channel, number_of_channels)
if not group_by == Constants.Val_GroupByScanNumber:
shape = tuple(reversed(shape))
data.reshape(shape)
else:
if group_by == Constants.Val_GroupByScanNumber:
samples_per_channel = data.shape[0]
else:
samples_per_channel = data.shape[-1]
err, count = self.lib.WriteDigitalLines(samples_per_channel,
bool32(auto_start),
float64(timeout), group_by,
data.ctypes.data, RetValue('u32'), None)
return count
# NotImplemented: WriteDigitalU8, WriteDigitalU16, WriteDigitalU32, WriteDigitalScalarU32
class CounterInputTask(Task):
"""Exposes NI-DAQmx counter input task to Python.
"""
CHANNEL_TYPE = 'CI'
def read_scalar(self, timeout=10.0):
"""Read a single floating-point sample from a counter task. Use
this function when the counter sample is scaled to a
floating-point value, such as for frequency and period
measurement.
:param float:
The amount of time, in seconds, to wait for the function to
read the sample(s). The default value is 10.0 seconds. To
specify an infinite wait, pass -1
(DAQmx_Val_WaitInfinitely). This function returns an error if
the timeout elapses.
A value of 0 indicates to try once to read the requested
samples. If all the requested samples are read, the function
is successful. Otherwise, the function returns a timeout error
and returns the samples that were actually read.
:return: The sample read from the task.
"""
err, value = self.lib.ReadCounterScalarF64(timeout, RetValue('f64'), None)
return value
def read(self, samples_per_channel=None, timeout=10.0):
"""Read multiple 32-bit integer samples from a counter task.
Use this function when counter samples are returned unscaled,
such as for edge counting.
:param samples_per_channel:
The number of samples, per channel, to read. The default
value of -1 (DAQmx_Val_Auto) reads all available samples. If
readArray does not contain enough space, this function
returns as many samples as fit in readArray.
NI-DAQmx determines how many samples to read based on
whether the task acquires samples continuously or acquires a
finite number of samples.
If the task acquires samples continuously and you set this
parameter to -1, this function reads all the samples
currently available in the buffer.
If the task acquires a finite number of samples and you set
this parameter to -1, the function waits for the task to
acquire all requested samples, then reads those samples. If
you set the Read All Available Samples property to TRUE, the
function reads the samples currently available in the buffer
and does not wait for the task to acquire all requested
samples.
:param timeout:
The amount of time, in seconds, to wait for the function to
read the sample(s). The default value is 10.0 seconds. To
specify an infinite wait, pass -1
(DAQmx_Val_WaitInfinitely). This function returns an error
if the timeout elapses.
A value of 0 indicates to try once to read the requested
samples. If all the requested samples are read, the function
is successful. Otherwise, the function returns a timeout
error and returns the samples that were actually read.
:return: The array of samples read.
"""
if samples_per_channel is None:
samples_per_channel = self.samples_per_channel_available()
data = np.zeros((samples_per_channel,),dtype=np.int32)
err, count = self.lib.ReadCounterU32(samples_per_channel, float64(timeout),
data.ctypes.data, data.size, RetValue('i32'), None)
return data[:count]
class CounterOutputTask(Task):
"""Exposes NI-DAQmx counter output task to Python.
"""
channel_type = 'CO'
Task.register_class(AnalogInputTask) | PypiClean |
/APScheduler-4.0.0a1.tar.gz/APScheduler-4.0.0a1/src/apscheduler/workers/sync.py | from __future__ import annotations
import atexit
import os
import platform
import threading
from concurrent.futures import Future, ThreadPoolExecutor
from contextlib import ExitStack
from contextvars import copy_context
from datetime import datetime, timezone
from logging import Logger, getLogger
from types import TracebackType
from typing import Callable
from uuid import UUID
import attrs
from .. import JobReleased
from .._context import current_job, current_worker
from .._enums import JobOutcome, RunState
from .._events import JobAdded, WorkerStarted, WorkerStopped
from .._structures import Job, JobInfo, JobResult
from .._validators import positive_integer
from ..abc import DataStore, EventBroker
from ..eventbrokers.local import LocalEventBroker
@attrs.define(eq=False)
class Worker:
"""Runs jobs locally in a thread pool."""
data_store: DataStore
event_broker: EventBroker = attrs.field(factory=LocalEventBroker)
max_concurrent_jobs: int = attrs.field(
kw_only=True, validator=positive_integer, default=20
)
identity: str = attrs.field(kw_only=True, default=None)
logger: Logger | None = attrs.field(kw_only=True, default=getLogger(__name__))
# True if a scheduler owns this worker
_is_internal: bool = attrs.field(kw_only=True, default=False)
_state: RunState = attrs.field(init=False, default=RunState.stopped)
_thread: threading.Thread | None = attrs.field(init=False, default=None)
_wakeup_event: threading.Event = attrs.field(init=False, factory=threading.Event)
_executor: ThreadPoolExecutor = attrs.field(init=False)
_acquired_jobs: set[Job] = attrs.field(init=False, factory=set)
_running_jobs: set[UUID] = attrs.field(init=False, factory=set)
def __attrs_post_init__(self) -> None:
if not self.identity:
self.identity = f"{platform.node()}-{os.getpid()}-{id(self)}"
def __enter__(self) -> Worker:
self.start_in_background()
return self
def __exit__(
self,
exc_type: type[BaseException],
exc_val: BaseException,
exc_tb: TracebackType,
) -> None:
self.stop()
@property
def state(self) -> RunState:
"""The current running state of the worker."""
return self._state
def start_in_background(self) -> None:
"""
Launch the worker in a new thread.
This method registers an :mod:`atexit` hook to shut down the worker and wait
for the thread to finish.
"""
start_future: Future[None] = Future()
self._thread = threading.Thread(
target=copy_context().run, args=[self._run, start_future], daemon=True
)
self._thread.start()
try:
start_future.result()
except BaseException:
self._thread = None
raise
atexit.register(self.stop)
def stop(self) -> None:
"""
Signal the worker that it should stop running jobs.
This method does not wait for the worker to actually stop.
"""
atexit.unregister(self.stop)
if self._state is RunState.started:
self._state = RunState.stopping
self._wakeup_event.set()
if threading.current_thread() != self._thread:
self._thread.join()
self._thread = None
def run_until_stopped(self) -> None:
"""
Run the worker until it is explicitly stopped.
This method will only return if :meth:`stop` is called.
"""
self._run(None)
def _run(self, start_future: Future[None] | None) -> None:
with ExitStack() as exit_stack:
try:
if self._state is not RunState.stopped:
raise RuntimeError(
f'Cannot start the worker when it is in the "{self._state}" '
f"state"
)
if not self._is_internal:
# Initialize the event broker
self.event_broker.start()
exit_stack.push(
lambda *exc_info: self.event_broker.stop(
force=exc_info[0] is not None
)
)
# Initialize the data store
self.data_store.start(self.event_broker)
exit_stack.push(
lambda *exc_info: self.data_store.stop(
force=exc_info[0] is not None
)
)
# Set the current worker
token = current_worker.set(self)
exit_stack.callback(current_worker.reset, token)
# Wake up the worker if the data store emits a significant job event
exit_stack.enter_context(
self.event_broker.subscribe(
lambda event: self._wakeup_event.set(), {JobAdded}
)
)
# Initialize the thread pool
executor = ThreadPoolExecutor(max_workers=self.max_concurrent_jobs)
exit_stack.enter_context(executor)
# Signal that the worker has started
self._state = RunState.started
self.event_broker.publish_local(WorkerStarted())
except BaseException as exc:
if start_future:
start_future.set_exception(exc)
return
else:
raise
else:
if start_future:
start_future.set_result(None)
try:
while self._state is RunState.started:
available_slots = self.max_concurrent_jobs - len(self._running_jobs)
if available_slots:
jobs = self.data_store.acquire_jobs(
self.identity, available_slots
)
for job in jobs:
task = self.data_store.get_task(job.task_id)
self._running_jobs.add(job.id)
executor.submit(
copy_context().run, self._run_job, job, task.func
)
self._wakeup_event.wait()
self._wakeup_event = threading.Event()
except BaseException as exc:
self._state = RunState.stopped
if isinstance(exc, Exception):
self.logger.exception("Worker crashed")
else:
self.logger.info(f"Worker stopped due to {exc.__class__.__name__}")
self.event_broker.publish_local(WorkerStopped(exception=exc))
else:
self._state = RunState.stopped
self.logger.info("Worker stopped")
self.event_broker.publish_local(WorkerStopped())
def _run_job(self, job: Job, func: Callable) -> None:
try:
# Check if the job started before the deadline
start_time = datetime.now(timezone.utc)
if job.start_deadline is not None and start_time > job.start_deadline:
result = JobResult.from_job(
job, JobOutcome.missed_start_deadline, finished_at=start_time
)
self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
self.data_store.release_job(self.identity, job.task_id, result)
return
token = current_job.set(JobInfo.from_job(job))
try:
retval = func(*job.args, **job.kwargs)
except BaseException as exc:
if isinstance(exc, Exception):
self.logger.exception("Job %s raised an exception", job.id)
else:
self.logger.error(
"Job %s was aborted due to %s", job.id, exc.__class__.__name__
)
result = JobResult.from_job(
job,
JobOutcome.error,
exception=exc,
)
self.data_store.release_job(
self.identity,
job.task_id,
result,
)
self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
if not isinstance(exc, Exception):
raise
else:
self.logger.info("Job %s completed successfully", job.id)
result = JobResult.from_job(
job,
JobOutcome.success,
return_value=retval,
)
self.data_store.release_job(self.identity, job.task_id, result)
self.event_broker.publish(
JobReleased.from_result(result, self.identity)
)
finally:
current_job.reset(token)
finally:
self._running_jobs.remove(job.id) | PypiClean |
/ObjectListView2-1.0.0.tar.gz/ObjectListView2-1.0.0/README.txt | .. -*- coding: UTF-8 -*-
ObjectListView
==============
This is an upload of olv 1.3.2 from https://bitbucket.org/wbruhin/objectlistview/src/default/
*An ObjectListView is a wrapper around the wx.ListCtrl that makes the
list control easier to use. It also provides some useful extra functionality.*
Larry Wall, the author of Perl, once wrote that the three essential character flaws of any
good programmer were sloth, impatience and hubris. Good programmers want to do the minimum
amount of work (sloth). They want their programs to run quickly (impatience). They take
inordinate pride in what they have written (hubris).
ObjectListView encourages the vices of sloth and hubris, by allowing programmers to do far
less work but still produce great looking results.
Without wasting my time, just tell me what it does!
---------------------------------------------------
OK, here's the bullet point feature list:
* Automatically transforms a collection of model objects into a fully functional wx.ListCtrl.
* Automatically sorts rows.
* Easily edits the cell values.
* Supports all ListCtrl views (report, list, large and small icons).
* Columns can be fixed-width, have a minimum and/or maximum width, or be space-filling.
* Displays a "list is empty" message when the list is empty (obviously).
* Supports checkboxes in any column
* Supports alternate rows background colors.
* Supports custom formatting of rows.
* Supports searching (by typing) on any column, even on massive lists.
* Supports custom sorting
* The `FastObjectListView` version can build a list of 10,000 objects in less than 0.1 seconds.
* The `VirtualObjectListView` version supports millions of rows through ListCtrl's virtual mode.
* The `GroupListView` version supports arranging rows into collapsible groups.
* Effortlessly produce professional-looking reports using a ListCtrlPrinter.
Seriously, after using an ObjectListView, you will never go back to using a plain wx.ListCtrl.
OK, I'm interested. What do I do next?
--------------------------------------
As Phillip has not updated the Python version for a long time I created this fork as I want to
use it with wxPython 2.9+ and wxPython 3.x Phoenix on both Python 2.x and 3.x.
To install this version clone the repo or download it and then run `setup.py install`.
Cloning using ssh::
hg clone ssh://[email protected]/wbruhin/objectlistview
Cloning using https::
hg clone https://[email protected]/wbruhin/objectlistview
The original project was hosted on `SourceForge <https://sourceforge.net/project/showfiles.php?group_id=225207&package_id=280564>`_.
| PypiClean |
/Flask-Obscurity-0.4.tar.gz/Flask-Obscurity-0.4/flask_obscurity/__init__.py |
import random
import os
import re
from flask import Blueprint, current_app, url_for
from jinja2 import Markup, escape
from six import b, iterbytes, indexbytes
class Obscurity(object):
def __init__(self, app=None):
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
self.blueprint = Blueprint(
'obscurity',
__name__,
template_folder='templates',
static_folder='static',
static_url_path=self.app.static_url_path + '/oe')
app.register_blueprint(self.blueprint)
app.config.setdefault('OBSCURE_KEY_LENGTH', 5)
app.jinja_env.filters['pmailto'] = pmailto
app.jinja_env.filters['pspan'] = pspan
app.jinja_env.filters['pmailto_all'] = pmailto_all
app.jinja_env.filters['pspan_all'] = pspan_all
app.jinja_env.globals['obscurity_js'] = lambda: Markup(
"""<script src="{}"></script>""".format(url_for(
'obscurity.static', filename='js/uoe.js', _external=True,
)))
EMAIL_REGEX = re.compile(
"[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z"
"0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?"
)
def obscure(address, keylength=None):
if not keylength:
keylength = current_app.config['OBSCURE_KEY_LENGTH']
k = list(iterbytes(os.urandom(keylength)))
positions = list(range(len(address)))
address = b(address)
random.shuffle(positions)
# format: key length, key bytes, [pos, byte]
rv = [keylength]
rv.extend(k)
for pos in positions:
rv.append(pos)
ciph = (indexbytes(address, pos) + k[pos % len(k)]) % 256
rv.append(ciph)
return ','.join(str(n) for n in rv)
def pmailto(address, linkcontent=None):
if not linkcontent:
return Markup(
u'<a class="oe-link" data-oe="{}">(hidden)</a>'.format(
escape(obscure(address))
))
else:
return Markup(
u'<a class="oe-link" data-oe="{}" data-keep="1">{}</a>'.format(
escape(obscure(address)), linkcontent
))
def pspan(address):
return Markup(
u'<span class="oe-text" data-oe="%s">(hidden)</span>'.format(
escape(obscure(address))
))
def pmailto_all(text):
return EMAIL_REGEX.sub(lambda m: pmailto(m.group(0)), text)
def pspan_all(text):
return EMAIL_REGEX.sub(lambda m: pspan(m.group(0)), text) | PypiClean |
/Flask-Monitoring-1.1.2.tar.gz/Flask-Monitoring-1.1.2/flask_monitoringdashboard/views/version.py | from flask import jsonify, request, json
from flask_monitoringdashboard.controllers.versions import (
get_multi_version_data,
get_version_user_data,
get_version_ip_data,
)
from flask_monitoringdashboard.database import session_scope
from flask_monitoringdashboard.core.auth import secure
from flask_monitoringdashboard import blueprint
from flask_monitoringdashboard.database.versions import get_versions
@blueprint.route('/api/versions')
@blueprint.route('/api/versions/<endpoint_id>')
@secure
def versions(endpoint_id=None):
"""
:param endpoint_id: integer
:return: A JSON-list with all versions of a specific endpoint (version represented by a string)
"""
with session_scope() as db_session:
version_dates = get_versions(db_session, endpoint_id)
dicts = []
for vt in version_dates:
dicts.append({'version': vt[0], 'date': vt[1]})
return jsonify(dicts)
@blueprint.route('/api/multi_version', methods=['POST'])
@secure
def multi_version():
"""
input must be a JSON-object, with a list of endpoints and versions, such as:
{
endpoints: ['endpoint0', endpoint1],
versions: ['0.1', '0.2', '0.3']
}
:return: A JSON-list for all endpoints, with a JSON-list for every version.
output: {
[
[10, 11, 12],
[13, 14, 15]
]
}
"""
data = json.loads(request.data)['data']
endpoints = data['endpoints']
versions = data['versions']
with session_scope() as db_session:
return jsonify(get_multi_version_data(db_session, endpoints, versions))
@blueprint.route('/api/version_user/<endpoint_id>', methods=['POST'])
@secure
def version_user(endpoint_id):
"""
input must be a JSON-object, with a list of versions and users, such as:
{
users: ['user0', user1],
versions: ['0.1', '0.2', '0.3']
}
:return: A JSON-list for all users, with a JSON-list for every version.
output: {
data: [
[10, 11, 12],
[13, 14, 15]
],
versions: [
{ date: '...', version: '0.1'},
{ date: '...', version: '0.2'},
{ date: '...', version: '0.3'}
]
}
"""
data = json.loads(request.data)['data']
versions = data['versions']
users = data['users']
with session_scope() as db_session:
return jsonify(get_version_user_data(db_session, endpoint_id, versions, users))
@blueprint.route('/api/version_ip/<endpoint_id>', methods=['POST'])
@secure
def version_ip(endpoint_id):
"""
input must be a JSON-object, with a list of versions and IP-addresses, such as:
{
ip: ['127.0.0.1', '127.0.0.2'],
versions: ['0.1', '0.2', '0.3']
}
:return: A JSON-list for all IP-addresses, with a JSON-list for every version.
output: {
data: [
[10, 11, 12],
[13, 14, 15]
],
versions: [
{ date: '...', version: '0.1'},
{ date: '...', version: '0.2'},
{ date: '...', version: '0.3'}
]
}
"""
data = json.loads(request.data)['data']
versions = data['versions']
ips = data['ip']
with session_scope() as db_session:
return jsonify(get_version_ip_data(db_session, endpoint_id, versions, ips)) | PypiClean |
/Flask_Admin-1.6.1-py3-none-any.whl/flask_admin/contrib/mongoengine/fields.py | from mongoengine.base import get_document
from werkzeug.datastructures import FileStorage
from wtforms import fields
try:
from wtforms.fields.core import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from . import widgets
from flask_admin.model.fields import InlineFormField
def is_empty(file_object):
file_object.seek(0)
first_char = file_object.read(1)
file_object.seek(0)
return not bool(first_char)
class ModelFormField(InlineFormField):
"""
Customized ModelFormField for MongoEngine EmbeddedDocuments.
"""
def __init__(self, model, view, form_class, form_opts=None, **kwargs):
super(ModelFormField, self).__init__(form_class, **kwargs)
self.model = model
if isinstance(self.model, str):
self.model = get_document(self.model)
self.view = view
self.form_opts = form_opts
def populate_obj(self, obj, name):
candidate = getattr(obj, name, None)
is_created = candidate is None
if is_created:
candidate = self.model()
setattr(obj, name, candidate)
self.form.populate_obj(candidate)
self.view._on_model_change(self.form, candidate, is_created)
class MongoFileField(fields.FileField):
"""
GridFS file field.
"""
widget = widgets.MongoFileInput()
def __init__(self, label=None, validators=None, **kwargs):
super(MongoFileField, self).__init__(label, validators, **kwargs)
self._should_delete = False
def process(self, formdata, data=unset_value):
if formdata:
marker = '_%s-delete' % self.name
if marker in formdata:
self._should_delete = True
return super(MongoFileField, self).process(formdata, data)
def populate_obj(self, obj, name):
field = getattr(obj, name, None)
if field is not None:
# If field should be deleted, clean it up
if self._should_delete:
field.delete()
return
if isinstance(self.data, FileStorage) and not is_empty(self.data.stream):
if not field.grid_id:
func = field.put
else:
func = field.replace
func(self.data.stream,
filename=self.data.filename,
content_type=self.data.content_type)
class MongoImageField(MongoFileField):
"""
GridFS image field.
"""
widget = widgets.MongoImageInput() | PypiClean |
/DEODR-0.2.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl/deodr/mesh_fitter.py |
import copy
from typing import Any, Dict, List, Optional, Tuple
import numpy as np
import scipy.sparse.linalg
import scipy.spatial.transform.rotation
from . import Camera, ColoredTriMesh, LaplacianRigidEnergy, Scene3D
from .tools import (
check_jacobian_finite_differences,
normalize,
normalize_backward,
qrot,
qrot_backward,
)
class MeshDepthFitter:
"""Class to fit a deformable mesh to a depth image."""
def __init__(
self,
vertices: np.ndarray,
faces: np.ndarray,
euler_init: np.ndarray,
translation_init: np.ndarray,
cregu: float = 2000,
inertia: float = 0.96,
damping: float = 0.05,
):
self.cregu = cregu
self.inertia = inertia
self.damping = damping
self.step_factor_vertices = 0.0005
self.step_max_vertices = 1
self.step_factor_quaternion = 0.00006
self.step_max_quaternion = 0.1
self.step_factor_translation = 0.00005
self.step_max_translation = 0.1
self.mesh = ColoredTriMesh(
faces, vertices=vertices, colors=np.zeros((vertices.shape[0], 0))
) # we do a copy to avoid negative stride not support by pytorch
object_center = vertices.mean(axis=0)
object_radius = np.max(np.std(vertices, axis=0))
self.camera_center = object_center + np.array([-0.5, 0, 5]) * object_radius
self.scene = Scene3D()
self.scene.set_mesh(self.mesh)
self.rigid_energy = LaplacianRigidEnergy(self.mesh, vertices, cregu)
self.vertices_init = copy.copy(vertices)
self.Hfactorized = None
self.Hpreconditioner = None
self.set_mesh_transform_init(euler=euler_init, translation=translation_init)
self.reset()
def set_mesh_transform_init(
self, euler: np.ndarray, translation: np.ndarray
) -> None:
self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(
"zyx", euler
).as_quat()
self.transform_translation_init = translation
def reset(self) -> None:
self.vertices = copy.copy(self.vertices_init)
self.speed_vertices = np.zeros(self.vertices_init.shape)
self.transform_quaternion = copy.copy(self.transform_quaternion_init)
self.transform_translation = copy.copy(self.transform_translation_init)
self.speed_translation = np.zeros(3)
self.speed_quaternion = np.zeros(4)
def set_max_depth(self, max_depth: float) -> None:
self.max_depth = max_depth
self.scene.set_background_color(np.array([max_depth], dtype=np.float64))
def set_depth_scale(self, depth_scale: float) -> None:
self.depthScale = depth_scale
def set_image(
self,
mesh_image: np.ndarray,
focal: Optional[float] = None,
distortion: Optional[np.ndarray] = None,
) -> None:
self.width = mesh_image.shape[1]
self.height = mesh_image.shape[0]
assert mesh_image.ndim == 2
self.mesh_image = mesh_image
if focal is None:
focal = 2 * self.width
rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
trans = -rot.T.dot(self.camera_center)
intrinsic = np.array(
[[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]
)
extrinsic = np.column_stack((rot, trans))
self.camera = Camera(
extrinsic=extrinsic,
intrinsic=intrinsic,
distortion=distortion,
height=self.height,
width=self.width,
)
self.iter = 0
def render(self) -> np.ndarray:
q_normalized = normalize(
self.transform_quaternion
) # that will lead to a gradient that is in the tangent space
vertices_transformed = (
qrot(q_normalized, self.vertices) + self.transform_translation
)
self.mesh.set_vertices(vertices_transformed)
self.depth_not_clipped = self.scene.render_depth(
self.camera,
depth_scale=self.depthScale,
)
return np.clip(self.depth_not_clipped, 0, self.max_depth)
def render_backward(self, depth_b: np.ndarray) -> None:
self.scene.clear_gradients()
depth_b[self.depth_not_clipped < 0] = 0
depth_b[self.depth_not_clipped > self.max_depth] = 0
self.scene.render_depth_backward(depth_b)
assert self.scene.mesh is not None
assert self.scene.mesh._vertices_b is not None
vertices_transformed_b = self.scene.mesh._vertices_b
self.transform_translation_b = np.sum(vertices_transformed_b, axis=0)
q_normalized = normalize(self.transform_quaternion)
q_normalized_b, self._vertices_b = qrot_backward(
q_normalized, self.vertices, vertices_transformed_b
)
self.transform_quaternion_b = normalize_backward(
self.transform_quaternion, q_normalized_b
) # that will lead to a gradient that is in the tangeant space
def step(self) -> Tuple[float, np.ndarray, np.ndarray]:
self.vertices = self.vertices - np.mean(self.vertices, axis=0)[None, :]
depth = self.render()
diff_image = np.sum((depth - self.mesh_image[:, :, None]) ** 2, axis=2)
energy_data = np.sum(diff_image)
depth_b = 2 * (depth - self.mesh_image[:, :, None])
self.render_backward(depth_b)
self._vertices_b = self._vertices_b - np.mean(self._vertices_b, axis=0)[None, :]
grad_data = self._vertices_b
# update v
(
energy_rigid,
grad_rigidity,
_,
) = self.rigid_energy.evaluate(self.vertices)
energy = energy_data + energy_rigid
print("Energy=%f : EData=%f E_rigid=%f" % (energy, energy_data, energy_rigid))
# update v
grad = grad_data + grad_rigidity
def mult_and_clamp(x: np.ndarray, a: float, t: float) -> np.ndarray:
return np.minimum(np.maximum(x * a, -t), t)
inertia = self.inertia
# update vertices
step_vertices = mult_and_clamp(
-grad, self.step_factor_vertices, self.step_max_vertices
)
self.speed_vertices = (1 - self.damping) * (
self.speed_vertices * self.inertia + (1 - self.inertia) * step_vertices
)
self.vertices = self.vertices + self.speed_vertices
# update rotation
step_quaternion = mult_and_clamp(
-self.transform_quaternion_b,
self.step_factor_quaternion,
self.step_max_quaternion,
)
self.speed_quaternion = (1 - self.damping) * (
self.speed_quaternion * inertia + (1 - inertia) * step_quaternion
)
self.transform_quaternion = self.transform_quaternion + self.speed_quaternion
self.transform_quaternion = self.transform_quaternion / np.linalg.norm(
self.transform_quaternion
)
# update translation
step_translation = mult_and_clamp(
-self.transform_translation_b,
self.step_factor_translation,
self.step_max_translation,
)
self.speed_translation = (1 - self.damping) * (
self.speed_translation * inertia + (1 - inertia) * step_translation
)
self.transform_translation = self.transform_translation + self.speed_translation
self.iter += 1
return energy, depth[:, :, 0], diff_image
class MeshRGBFitterWithPose:
"""Class to fit a deformable mesh to a color image."""
def __init__(
self,
vertices: np.ndarray,
faces: np.ndarray,
euler_init: np.ndarray,
translation_init: np.ndarray,
default_color: np.ndarray,
default_light_directional: np.ndarray,
default_light_ambient: float,
cregu: float = 2000,
inertia: float = 0.96,
damping: float = 0.05,
update_lights: bool = True,
update_color: bool = True,
):
self.cregu = cregu
self.inertia = inertia
self.damping = damping
self.step_factor_vertices = 0.0005
self.step_max_vertices = 0.5
self.step_factor_quaternion = 0.00006
self.step_max_quaternion = 0.05
self.step_factor_translation = 0.00005
self.step_max_translation = 0.1
self.default_color = default_color
self.default_light_directional = default_light_directional
self.default_light_ambient = default_light_ambient
self.update_lights = update_lights
self.update_color = update_color
self.mesh = ColoredTriMesh(faces.copy(), vertices=vertices, nb_colors=3)
object_center = vertices.mean(axis=0) + translation_init
object_radius = np.max(np.std(vertices, axis=0))
self.camera_center = object_center + np.array([0, 0, 9]) * object_radius
self.scene = Scene3D()
self.scene.set_mesh(self.mesh)
self.rigid_energy = LaplacianRigidEnergy(self.mesh, vertices, cregu)
self.vertices_init = copy.copy(vertices)
self.Hfactorized = None
self.Hpreconditioner = None
self.set_mesh_transform_init(euler=euler_init, translation=translation_init)
self.reset()
def set_background_color(self, background_color: np.ndarray) -> None:
self.scene.set_background_color(background_color)
def set_mesh_transform_init(
self, euler: np.ndarray, translation: np.ndarray
) -> None:
self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(
"zyx", euler
).as_quat()
self.transform_translation_init = translation
def reset(self) -> None:
self.vertices = copy.copy(self.vertices_init)
self.speed_vertices = np.zeros(self.vertices.shape)
self.transform_quaternion = copy.copy(self.transform_quaternion_init)
self.transform_translation = copy.copy(self.transform_translation_init)
self.speed_translation = np.zeros(3)
self.speed_quaternion = np.zeros(4)
self.mesh_color = copy.copy(self.default_color)
self.light_directional = copy.copy(self.default_light_directional)
self.light_ambient = copy.copy(self.default_light_ambient)
self.speed_light_directional = np.zeros(self.light_directional.shape)
self.speed_light_ambient = 0.0
self.speed_mesh_color = np.zeros(self.mesh_color.shape)
def set_image(
self,
mesh_image: np.ndarray,
focal: Optional[float] = None,
distortion: Optional[np.ndarray] = None,
) -> None:
self.width = mesh_image.shape[1]
self.height = mesh_image.shape[0]
assert mesh_image.ndim == 3
self.mesh_image = mesh_image
if focal is None:
focal = 2 * self.width
rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
trans = -rot.T.dot(self.camera_center)
intrinsic = np.array(
[[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]
)
extrinsic = np.column_stack((rot, trans))
self.camera = Camera(
extrinsic=extrinsic,
intrinsic=intrinsic,
distortion=distortion,
width=self.width,
height=self.height,
)
self.iter = 0
def render(self) -> np.ndarray:
q_normalized = normalize(
self.transform_quaternion
) # that will lead to a gradient that is in the tangent space
vertices_transformed = (
qrot(q_normalized, self.vertices) + self.transform_translation
)
self.mesh.set_vertices(vertices_transformed)
self.scene.set_light(
light_directional=self.light_directional, light_ambient=self.light_ambient
)
self.mesh.set_vertices_colors(
np.tile(self.mesh_color, (self.mesh.nb_vertices, 1))
)
return self.scene.render(self.camera)
def render_backward(self, image_b: np.ndarray) -> None:
assert self.scene.mesh is not None
self.scene.clear_gradients()
self.scene.render_backward(image_b)
assert self.mesh.vertices_colors_b is not None # helping mypy
self.mesh_color_b = np.sum(self.mesh.vertices_colors_b, axis=0)
self.light_directional_b = self.scene.light_directional_b
self.light_ambient_b = self.scene.light_ambient_b
vertices_transformed_b = self.scene.mesh._vertices_b
self.transform_translation_b = np.sum(vertices_transformed_b, axis=0)
q_normalized = normalize(self.transform_quaternion)
q_normalized_b, self._vertices_b = qrot_backward(
q_normalized, self.vertices, vertices_transformed_b
)
self.transform_quaternion_b = normalize_backward(
self.transform_quaternion, q_normalized_b
) # that will lead to a gradient that is in the tangent space
def step(self) -> Tuple[float, np.ndarray, np.ndarray]:
self.vertices = self.vertices - np.mean(self.vertices, axis=0)[None, :]
image = self.render()
diff_image = np.sum((image - self.mesh_image) ** 2, axis=2)
image_b = 2 * (image - self.mesh_image)
energy_data = np.sum(diff_image)
(
energy_rigid,
grad_rigidity,
_,
) = self.rigid_energy.evaluate(self.vertices)
energy = energy_data + energy_rigid
print("Energy=%f : EData=%f E_rigid=%f" % (energy, energy_data, energy_rigid))
self.render_backward(image_b)
self._vertices_b = self._vertices_b - np.mean(self._vertices_b, axis=0)[None, :]
# update v
grad = self._vertices_b + grad_rigidity
def mult_and_clamp(x: np.ndarray, a: float, t: float) -> np.ndarray:
return np.minimum(np.maximum(x * a, -t), t)
inertia = self.inertia
# update vertices
step_vertices = mult_and_clamp(
-grad, self.step_factor_vertices, self.step_max_vertices
)
self.speed_vertices = (1 - self.damping) * (
self.speed_vertices * inertia + (1 - inertia) * step_vertices
)
self.vertices = self.vertices + self.speed_vertices
# update rotation
step_quaternion = mult_and_clamp(
-self.transform_quaternion_b,
self.step_factor_quaternion,
self.step_max_quaternion,
)
self.speed_quaternion = (1 - self.damping) * (
self.speed_quaternion * inertia + (1 - inertia) * step_quaternion
)
self.transform_quaternion = self.transform_quaternion + self.speed_quaternion
self.transform_quaternion = self.transform_quaternion / np.linalg.norm(
self.transform_quaternion
)
# update translation
step_translation = mult_and_clamp(
-self.transform_translation_b,
self.step_factor_translation,
self.step_max_translation,
)
self.speed_translation = (1 - self.damping) * (
self.speed_translation * inertia + (1 - inertia) * step_translation
)
self.transform_translation = self.transform_translation + self.speed_translation
# update directional light
step = -self.light_directional_b * 0.0001
self.speed_light_directional = (1 - self.damping) * (
self.speed_light_directional * inertia + (1 - inertia) * step
)
self.light_directional = self.light_directional + self.speed_light_directional
# update ambient light
step = -self.light_ambient_b * 0.0001
self.speed_light_ambient = (1 - self.damping) * (
self.speed_light_ambient * inertia + (1 - inertia) * step
)
self.light_ambient = self.light_ambient + self.speed_light_ambient
# update mesh color
step = -self.mesh_color_b * 0.00001
self.speed_mesh_color = (1 - self.damping) * (
self.speed_mesh_color * inertia + (1 - inertia) * step
)
self.mesh_color = self.mesh_color + self.speed_mesh_color
self.iter += 1
return energy, image, diff_image
class MeshRGBFitterWithPoseMultiFrame:
"""Class to fit a deformable mesh to multiple color images."""
def __init__(
self,
vertices: np.ndarray,
faces: np.ndarray,
euler_init: np.ndarray,
translation_init: np.ndarray,
default_color: np.ndarray,
default_light_directional: np.ndarray,
default_light_ambient: float,
cregu: float = 2000,
cdata: float = 1,
inertia: float = 0.97,
damping: float = 0.15,
update_lights: bool = True,
update_color: bool = True,
):
self.cregu = cregu
self.cdata = cdata
self.inertia = inertia
self.damping = damping
self.step_factor_vertices = 0.0005
self.step_max_vertices = 0.5
self.step_factor_quaternion = 0.00005
self.step_max_quaternion = 0.05
self.step_factor_translation = 0.00004
self.step_max_translation = 0.1
self.default_color = default_color
self.default_light_directional = default_light_directional
self.default_light_ambient = default_light_ambient
self.update_lights = update_lights
self.update_color = update_color
self.mesh = ColoredTriMesh(faces, vertices, nb_colors=3)
object_center = vertices.mean(axis=0)
self.object_radius = np.max(np.std(vertices, axis=0))
self.camera_center = object_center + np.array([0, 0, 6]) * self.object_radius
self.scene = Scene3D()
self.scene.set_mesh(self.mesh)
self.rigid_energy = LaplacianRigidEnergy(self.mesh, vertices, cregu)
self.vertices_init = copy.copy(vertices)
self.Hfactorized = None
self.Hpreconditioner = None
self.set_mesh_transform_init(euler=euler_init, translation=translation_init)
self.store_backward: Dict[str, Any] = {}
self.reset()
def set_background_color(self, background_color: np.ndarray) -> None:
self.scene.set_background_color(background_color)
def set_mesh_transform_init(
self, euler: np.ndarray, translation: np.ndarray
) -> None:
self.transform_quaternion_init = scipy.spatial.transform.Rotation.from_euler(
"zyx", euler
).as_quat()
self.transform_translation_init = translation
def reset(self) -> None:
self.vertices = copy.copy(self.vertices_init)
self.speed_vertices = np.zeros(self.vertices.shape)
self.transform_quaternion = copy.copy(self.transform_quaternion_init)
self.transform_translation = copy.copy(self.transform_translation_init)
self.speed_translation = np.zeros(3)
self.speed_quaternion = np.zeros(4)
self.mesh_color = copy.copy(self.default_color)
self.light_directional = copy.copy(self.default_light_directional)
self.light_ambient = self.default_light_ambient
self.speed_light_directional = np.zeros(self.light_directional.shape)
self.speed_light_ambient = 0.0
self.speed_mesh_color = np.zeros(self.mesh_color.shape)
def set_images(
self, mesh_images: List[np.ndarray], focal: Optional[float] = None
) -> None:
self.width = mesh_images[0].shape[1]
self.height = mesh_images[0].shape[0]
assert mesh_images[0].ndim == 3
self.mesh_images = mesh_images
if focal is None:
focal = 2 * self.width
rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
trans = -rot.T.dot(self.camera_center)
intrinsic = np.array(
[[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]
)
extrinsic = np.column_stack((rot, trans))
self.camera = Camera(
extrinsic=extrinsic,
intrinsic=intrinsic,
width=self.width,
height=self.height,
)
self.iter = 0
def set_image(self, mesh_image: np.ndarray, focal: Optional[float] = None) -> None:
self.width = mesh_image.shape[1]
self.height = mesh_image.shape[0]
assert mesh_image.ndim == 3
self.mesh_image = mesh_image
if focal is None:
focal = 2 * self.width
rot = np.array([[1, 0, 0], [0, -1, 0], [0, 0, -1]])
trans = -rot.T.dot(self.camera_center)
intrinsic = np.array(
[[focal, 0, self.width / 2], [0, focal, self.height / 2], [0, 0, 1]]
)
extrinsic = np.column_stack((rot, trans))
self.camera = Camera(
extrinsic=extrinsic,
intrinsic=intrinsic,
width=self.width,
height=self.height,
)
self.iter = 0
def render(self, idframe: Optional[int] = None) -> np.ndarray:
unormalized_quaternion = self.transform_quaternion[idframe]
q_normalized = normalize(
unormalized_quaternion
) # that will lead to a gradient that is in the tangent space
vertices_transformed = (
qrot(q_normalized, self.vertices) + self.transform_translation[idframe]
)
self.mesh.set_vertices(vertices_transformed)
self.scene.set_light(
light_directional=self.light_directional, light_ambient=self.light_ambient
)
self.mesh.set_vertices_colors(
np.tile(self.mesh_color, (self.mesh.nb_vertices, 1))
)
image = self.scene.render(self.camera)
self.store_backward["render"] = (idframe, unormalized_quaternion, q_normalized)
return image
def clear_gradients(self) -> None:
self.light_directional_b = np.zeros(self.light_directional.shape)
self.light_ambient_b = 0
self._vertices_b = np.zeros(self.vertices.shape)
self.transform_quaternion_b = np.zeros(self.transform_quaternion.shape)
self.transform_translation_b = np.zeros(self.transform_translation.shape)
self.mesh_color_b = np.zeros(self.mesh_color.shape)
self.store_backward = {}
def render_backward(self, image_b: np.ndarray) -> None:
assert self.mesh is not None
assert self.scene.mesh is not None
idframe, unormalized_quaternion, q_normalized = self.store_backward["render"]
self.scene.clear_gradients()
self.scene.render_backward(image_b)
assert self.mesh.vertices_colors_b is not None # helping mypy
self.mesh_color_b += np.sum(self.mesh.vertices_colors_b, axis=0)
self.light_directional_b += self.scene.light_directional_b
self.light_ambient_b += self.scene.light_ambient_b
vertices_transformed_b = self.scene.mesh._vertices_b
self.transform_translation_b[idframe] += np.sum(vertices_transformed_b, axis=0)
q_normalized_b, _vertices_b = qrot_backward(
q_normalized, self.vertices, vertices_transformed_b
)
self._vertices_b += _vertices_b
self.transform_quaternion_b[idframe] += normalize_backward(
unormalized_quaternion, q_normalized_b
) # that will lead to a gradient that is in the tangent space
def energy_data(
self, vertices: np.ndarray
) -> Tuple[float, List[np.ndarray], List[np.ndarray]]:
self.vertices = vertices
images: List[np.ndarray] = []
diff_images: List[np.ndarray] = []
energy_datas: List[float] = []
self.clear_gradients()
coef_data = self.cdata / self.nb_frames
for idframe in range(self.nb_frames):
image = self.render(idframe=idframe)
diff_image = np.sum(
(image[idframe] - self.mesh_images[idframe]) ** 2, axis=2
)
images.append(image)
diff_images.append(diff_image)
image_b = coef_data * 2 * (image[idframe] - self.mesh_images[idframe])
energy_data_image = coef_data * np.sum(diff_image)
energy_datas.append(energy_data_image)
self.render_backward(image_b)
energy_data = float(np.sum(energy_datas))
return energy_data, images, diff_images
def step(
self, check_gradient: bool = False
) -> Tuple[float, List[np.ndarray], List[np.ndarray]]:
self.vertices = self.vertices - np.mean(self.vertices, axis=0)[None, :]
self.nb_frames = len(self.mesh_images)
energy_data, image, diff_image = self.energy_data(self.vertices)
(
energy_rigid,
grad_rigidity,
_,
) = self.rigid_energy.evaluate(self.vertices)
if check_gradient:
def func(x: np.ndarray) -> np.ndarray:
return np.array(self.rigid_energy.evaluate(x)[0])
check_jacobian_finite_differences(
grad_rigidity.flatten(), func, self.vertices
)
def func(x: np.ndarray) -> np.ndarray:
return np.array(self.energy_data(x)[0])
grad_data = self._vertices_b.copy()
check_jacobian_finite_differences(grad_data.flatten(), func, self.vertices)
energy = energy_data + energy_rigid
print(
f"iter {self.iter} Energy={energy} : EData={energy_data} E_rigid={energy_rigid}"
)
if self.iter < 500:
self._vertices_b = (
self._vertices_b - np.mean(self._vertices_b, axis=0)[None, :]
)
# update v
grad = self._vertices_b + grad_rigidity
def mult_and_clamp(x: np.ndarray, a: float, t: float) -> np.ndarray:
return np.minimum(np.maximum(x * a, -t), t)
inertia = self.inertia
# update vertices
step_vertices = mult_and_clamp(
-grad, self.step_factor_vertices, self.step_max_vertices
)
self.speed_vertices = (1 - self.damping) * (
self.speed_vertices * inertia + (1 - inertia) * step_vertices
)
self.vertices = self.vertices + self.speed_vertices
# update rotation
step_quaternion = mult_and_clamp(
-self.transform_quaternion_b,
self.step_factor_quaternion,
self.step_max_quaternion,
)
self.speed_quaternion = (1 - self.damping) * (
self.speed_quaternion * inertia + (1 - inertia) * step_quaternion
)
self.transform_quaternion = self.transform_quaternion + self.speed_quaternion
self.transform_quaternion = self.transform_quaternion / np.linalg.norm(
self.transform_quaternion
)
# update translation
step_translation = mult_and_clamp(
-self.transform_translation_b,
self.step_factor_translation,
self.step_max_translation,
)
self.speed_translation = (1 - self.damping) * (
self.speed_translation * inertia + (1 - inertia) * step_translation
)
self.transform_translation = self.transform_translation + self.speed_translation
# update directional light
step = -self.light_directional_b * 0.0001
self.speed_light_directional = (1 - self.damping) * (
self.speed_light_directional * inertia + (1 - inertia) * step
)
self.light_directional = self.light_directional + self.speed_light_directional
# update ambient light
step_light_ambient = -self.light_ambient_b * 0.0001
self.speed_light_ambient = (1 - self.damping) * (
self.speed_light_ambient * inertia + (1 - inertia) * step_light_ambient
)
self.light_ambient = self.light_ambient + self.speed_light_ambient
# update mesh color
step = -self.mesh_color_b * 0.00001
self.speed_mesh_color = (1 - self.damping) * (
self.speed_mesh_color * inertia + (1 - inertia) * step
)
self.mesh_color = self.mesh_color + self.speed_mesh_color
self.iter += 1
return energy, image, diff_image | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/sitemaps/__init__.py | import warnings
from urllib.parse import urlencode
from urllib.request import urlopen
from django.apps import apps as django_apps
from django.conf import settings
from django.core import paginator
from django.core.exceptions import ImproperlyConfigured
from django.urls import NoReverseMatch, reverse
from django.utils import translation
from django.utils.deprecation import RemovedInDjango50Warning
PING_URL = "https://www.google.com/webmasters/tools/ping"
class SitemapNotFound(Exception):
pass
def ping_google(sitemap_url=None, ping_url=PING_URL, sitemap_uses_https=True):
"""
Alert Google that the sitemap for the current site has been updated.
If sitemap_url is provided, it should be an absolute path to the sitemap
for this site -- e.g., '/sitemap.xml'. If sitemap_url is not provided, this
function will attempt to deduce it by using urls.reverse().
"""
sitemap_full_url = _get_sitemap_full_url(sitemap_url, sitemap_uses_https)
params = urlencode({"sitemap": sitemap_full_url})
urlopen("%s?%s" % (ping_url, params))
def _get_sitemap_full_url(sitemap_url, sitemap_uses_https=True):
if not django_apps.is_installed("django.contrib.sites"):
raise ImproperlyConfigured(
"ping_google requires django.contrib.sites, which isn't installed."
)
if sitemap_url is None:
try:
# First, try to get the "index" sitemap URL.
sitemap_url = reverse("django.contrib.sitemaps.views.index")
except NoReverseMatch:
try:
# Next, try for the "global" sitemap URL.
sitemap_url = reverse("django.contrib.sitemaps.views.sitemap")
except NoReverseMatch:
pass
if sitemap_url is None:
raise SitemapNotFound(
"You didn't provide a sitemap_url, and the sitemap URL couldn't be "
"auto-detected."
)
Site = django_apps.get_model("sites.Site")
current_site = Site.objects.get_current()
scheme = "https" if sitemap_uses_https else "http"
return "%s://%s%s" % (scheme, current_site.domain, sitemap_url)
class Sitemap:
# This limit is defined by Google. See the index documentation at
# https://www.sitemaps.org/protocol.html#index.
limit = 50000
# If protocol is None, the URLs in the sitemap will use the protocol
# with which the sitemap was requested.
protocol = None
# Enables generating URLs for all languages.
i18n = False
# Override list of languages to use.
languages = None
# Enables generating alternate/hreflang links.
alternates = False
# Add an alternate/hreflang link with value 'x-default'.
x_default = False
def _get(self, name, item, default=None):
try:
attr = getattr(self, name)
except AttributeError:
return default
if callable(attr):
if self.i18n:
# Split the (item, lang_code) tuples again for the location,
# priority, lastmod and changefreq method calls.
item, lang_code = item
return attr(item)
return attr
def get_languages_for_item(self, item):
"""Languages for which this item is displayed."""
return self._languages()
def _languages(self):
if self.languages is not None:
return self.languages
return [lang_code for lang_code, _ in settings.LANGUAGES]
def _items(self):
if self.i18n:
# Create (item, lang_code) tuples for all items and languages.
# This is necessary to paginate with all languages already considered.
items = [
(item, lang_code)
for item in self.items()
for lang_code in self.get_languages_for_item(item)
]
return items
return self.items()
def _location(self, item, force_lang_code=None):
if self.i18n:
obj, lang_code = item
# Activate language from item-tuple or forced one before calling location.
with translation.override(force_lang_code or lang_code):
return self._get("location", item)
return self._get("location", item)
@property
def paginator(self):
return paginator.Paginator(self._items(), self.limit)
def items(self):
return []
def location(self, item):
return item.get_absolute_url()
def get_protocol(self, protocol=None):
# Determine protocol
if self.protocol is None and protocol is None:
warnings.warn(
"The default sitemap protocol will be changed from 'http' to "
"'https' in Django 5.0. Set Sitemap.protocol to silence this "
"warning.",
category=RemovedInDjango50Warning,
stacklevel=2,
)
# RemovedInDjango50Warning: when the deprecation ends, replace 'http'
# with 'https'.
return self.protocol or protocol or "http"
def get_domain(self, site=None):
# Determine domain
if site is None:
if django_apps.is_installed("django.contrib.sites"):
Site = django_apps.get_model("sites.Site")
try:
site = Site.objects.get_current()
except Site.DoesNotExist:
pass
if site is None:
raise ImproperlyConfigured(
"To use sitemaps, either enable the sites framework or pass "
"a Site/RequestSite object in your view."
)
return site.domain
def get_urls(self, page=1, site=None, protocol=None):
protocol = self.get_protocol(protocol)
domain = self.get_domain(site)
return self._urls(page, protocol, domain)
def get_latest_lastmod(self):
if not hasattr(self, "lastmod"):
return None
if callable(self.lastmod):
try:
return max([self.lastmod(item) for item in self.items()], default=None)
except TypeError:
return None
else:
return self.lastmod
def _urls(self, page, protocol, domain):
urls = []
latest_lastmod = None
all_items_lastmod = True # track if all items have a lastmod
paginator_page = self.paginator.page(page)
for item in paginator_page.object_list:
loc = f"{protocol}://{domain}{self._location(item)}"
priority = self._get("priority", item)
lastmod = self._get("lastmod", item)
if all_items_lastmod:
all_items_lastmod = lastmod is not None
if all_items_lastmod and (
latest_lastmod is None or lastmod > latest_lastmod
):
latest_lastmod = lastmod
url_info = {
"item": item,
"location": loc,
"lastmod": lastmod,
"changefreq": self._get("changefreq", item),
"priority": str(priority if priority is not None else ""),
"alternates": [],
}
if self.i18n and self.alternates:
item_languages = self.get_languages_for_item(item[0])
for lang_code in item_languages:
loc = f"{protocol}://{domain}{self._location(item, lang_code)}"
url_info["alternates"].append(
{
"location": loc,
"lang_code": lang_code,
}
)
if self.x_default and settings.LANGUAGE_CODE in item_languages:
lang_code = settings.LANGUAGE_CODE
loc = f"{protocol}://{domain}{self._location(item, lang_code)}"
loc = loc.replace(f"/{lang_code}/", "/", 1)
url_info["alternates"].append(
{
"location": loc,
"lang_code": "x-default",
}
)
urls.append(url_info)
if all_items_lastmod and latest_lastmod:
self.latest_lastmod = latest_lastmod
return urls
class GenericSitemap(Sitemap):
priority = None
changefreq = None
def __init__(self, info_dict, priority=None, changefreq=None, protocol=None):
self.queryset = info_dict["queryset"]
self.date_field = info_dict.get("date_field")
self.priority = self.priority or priority
self.changefreq = self.changefreq or changefreq
self.protocol = self.protocol or protocol
def items(self):
# Make sure to return a clone; we don't want premature evaluation.
return self.queryset.filter()
def lastmod(self, item):
if self.date_field is not None:
return getattr(item, self.date_field)
return None
def get_latest_lastmod(self):
if self.date_field is not None:
return (
self.queryset.order_by("-" + self.date_field)
.values_list(self.date_field, flat=True)
.first()
)
return None | PypiClean |
/Biosaur-2.0.3.tar.gz/Biosaur-2.0.3/biosaur_src/funcs.py | from . import classes
import numpy as np
from scipy.stats import binom
from scipy.stats import scoreatpercentile
from scipy.optimize import curve_fit
import operator
import math
from multiprocessing import Queue, Process, cpu_count
import logging
import itertools
from copy import deepcopy
from collections import defaultdict
logging.basicConfig(format=u'%(filename)s[LINE:%(lineno)d]#\
%(levelname)-8s [%(asctime)s] %(message)s', level=logging.DEBUG)
# def check_its_ready(id_real, peak, check_degree):
# mz_list = peak.mz_array
# scan_list = peak.scan_id
# for i in range(len(mz_list)):
# if id_real - scan_list[i][-1] > check_degree:
# tmp_ready_hill = classes.ready_hill(
# intensity=peak.intensity.pop(i),
# scan_id=peak.scan_id.pop(i),
# mass=peak.mass_array.pop(i),
# mz=peak.mz_array.pop(i))
# peak.finished_hills.append(tmp_ready_hill)
def data_to_features(input_file, max_diff, min_length_hill, proccess_number, start_index, end_index):
# data = mzml.read(input_file)
# working_area = next(data)
# working_area = input_file[0]
# idx = working_area['intensity array'] >= 1
# working_area['m/z array'] = working_area['m/z array'][idx]
# working_area['intensity array'] = working_area['intensity array'][idx]
# print(len(input_file[0]['m/z array']))
# if idx > 10:
# break
RT_dict = dict()
mz_step = max_diff * 1e-6 * 2500
k = 0
# print(len(input_file))
for i in input_file:
idx = (i['m/z array'] >= start_index) & (i['m/z array'] < end_index)
# (dists >= r) & (dists <= r+dr)
new_mz_array = i['m/z array'][idx]
new_intensity_array = i['intensity array'][idx]
if 'mean inverse reduced ion mobility array' in i:
new_ion_mobility_array = i['mean inverse reduced ion mobility array'][idx]
else:
new_ion_mobility_array = None
if k == 0:
peak1 = classes.peak(
new_mz_array,
new_intensity_array,
i['index'],
i['index'],
new_ion_mobility_array,
)
RT_dict[i['index']] = float(
i['scanList']['scan'][0]['scan start time'])
if k > 0:
next_peak_i = classes.next_peak(
new_mz_array,
new_intensity_array,
i['index'],
new_ion_mobility_array,
)
peak1.push_me_to_the_peak(next_peak_i, max_diff, min_length_hill, mz_step)
RT_dict[i['index']] = float(
i['scanList']['scan'][0]['scan start time'])
# if k > 10:
# break
# pass
k += 1
peak1.push_left(min_length=min_length_hill)
# peak1.medar = np.array(peak1.medar)
# peak1.medar = np.cumprod(peak1.medar)
# for idx in range(len(peak1.finished_hills)):
# tmp_mass = [mv * peak1.medar[sv-1] for mv, sv in zip(peak1.finished_hills[idx].mass, peak1.finished_hills[idx].scan_id)]
# peak1.finished_hills[idx].mass = tmp_mass
# peak1.finished_hills[idx].mz = np.median(tmp_mass)
logging.info(
u'Data converted to features with process /' +
str(proccess_number + 1) + '/ --->')
return peak1, RT_dict
def first_or_second(id1, id2, charge1, charge2, first, second, theoretiacal):
if abs(theoretiacal - first) <= abs(theoretiacal - second):
return id1, charge1
else:
return id2, charge2
def cos_correlation(theoretical_list, experimental_list):
suit_len = min(len(theoretical_list), len(experimental_list))
theoretical_list = theoretical_list[:suit_len]
experimental_list = experimental_list[:suit_len]
top = 0
bottom = math.sqrt(sum([numb * numb for numb in theoretical_list])) * \
math.sqrt(sum([numb * numb for numb in experimental_list]))
for i1, i2 in zip(theoretical_list, experimental_list):
top += i1 * i2
return top / bottom
def cos_correlation_new(theoretical_list, experimental_list, shf):
theor_total_sum = sum(theoretical_list)
theoretical_list = theoretical_list[shf:]
suit_len = min(len(theoretical_list), len(experimental_list))
theoretical_list = theoretical_list[:suit_len]
experimental_list = experimental_list[:suit_len]
top = 0
for i1, i2 in zip(theoretical_list, experimental_list):
top += i1 * i2
if not top:
return 0, 0
else:
bottom = math.sqrt(sum([numb * numb for numb in theoretical_list])) * \
math.sqrt(sum([numb * numb for numb in experimental_list]))
averagineExplained = sum(theoretical_list) / theor_total_sum
return top / bottom, averagineExplained
def cos_correlation_fill_zeroes(hill_1, hill_2):
inter_set = hill_1.scan_set.intersection(hill_2.scan_set)
if len(inter_set) >= 2:
top = 0
for i in inter_set:
h1_val = hill_1.idict.get(i, 0)
h2_val = hill_2.idict.get(i, 0)
top += h1_val * h2_val
bottom = hill_1.sqrt_of_i_sum_squares * hill_2.sqrt_of_i_sum_squares
# bottom = math.sqrt(sum(v * v for key, v in hill_1.idict.items() if key in inter_set)) * math.sqrt(sum(v * v for key, v in hill_2.idict.items() if key in inter_set))
return top / bottom
else:
return 0
def checking_cos_correlation_for_carbon_noshift(
theoretical_list, experimental_list, thresh):
# prev_corr = 0
# size = 1
best_value = 0
best_shift = 0
best_pos = 1
best_cor = 0
# for shf in range(4):
for shf in range(1):
# shf = 0
pos = len(experimental_list)
while pos != 1:
averagineCorrelation, averagineExplained = cos_correlation_new(
theoretical_list, experimental_list[:pos], shf)
# if averagineExplained < 0.5:
# break
if averagineExplained >= 0.5 and averagineCorrelation >= thresh:
tmp_val = averagineCorrelation * averagineExplained
if tmp_val > best_value:
best_value = tmp_val
best_cor = averagineCorrelation
best_shift = shf
best_pos = pos
break
pos -= 1
# if correlation >= thresh:
# if correlation >= prev_corr:
# prev_corr = correlation
# size = len(experimental_list)
# else:
# size = len(experimental_list)
# return correlation, pos#, shift
# experimental_list = experimental_list[:-1]
if best_value:
break
return best_cor, best_pos, best_shift
def checking_cos_correlation_for_carbon(
theoretical_list, experimental_list, thresh):
# prev_corr = 0
# size = 1
best_value = 0
best_shift = 0
best_pos = 1
best_cor = 0
for shf in range(4):
# shf = 0
pos = len(experimental_list)
while pos != 1:
averagineCorrelation, averagineExplained = cos_correlation_new(
theoretical_list, experimental_list[:pos], shf)
# if averagineExplained < 0.5:
# break
if averagineExplained >= 0.5 and averagineCorrelation >= thresh:
tmp_val = averagineCorrelation# * averagineExplained
if tmp_val > best_value:
best_value = tmp_val
best_cor = averagineCorrelation
best_shift = shf
best_pos = pos
break
pos -= 1
# if correlation >= thresh:
# if correlation >= prev_corr:
# prev_corr = correlation
# size = len(experimental_list)
# else:
# size = len(experimental_list)
# return correlation, pos#, shift
# experimental_list = experimental_list[:-1]
if best_value:
break
return best_cor, best_pos, best_shift
def iter_hills(
peak,
min_charge,
max_charge,
min_intensity,
mass_acc,
start_index,
end_index,
min_length,
proccess_number=1):
ready = []
averagine_mass = 111.1254
averagine_C = 4.9384
tmplist = list(range(10))
prec_masses = []
# prec_isotopes = []
# prec_minisotopes = []
# isotopes_int = []
a = dict()
mz_step = mass_acc * 1e-6 * 2500
# s_list = [1, 2, 3]
# s_dict = dict()
# for i in s_list:
# int_arr = binom.pmf(tmplist, i, 0.0425)
# s_dict[i] = int_arr
# tmplist_s = [1, 2, 3]
# s_list = [1, 2, 3]
# s_dict = dict()
# for i in s_list:
# int_arr = binom.pmf(tmplist_s, i, 0.0425)
# s_dict[i] = int_arr
# s_list = [s_dict[i] for i in tmplist_s]
# s_list = [0.9575, 0.0425, 0.0425**2, 0.0425**3]
for i in range(100, 20000, 100):
int_arr = binom.pmf(
tmplist,
float(i) /
averagine_mass *
averagine_C,
0.0107)
prec_masses.append(i)
# int_arr_norm = int_arr / int_arr.max()
int_arr_norm = int_arr / int_arr.sum()
# prec_is = np.where(int_arr_norm >= 0.01)[0]
# isotopes_int.append(int_arr_norm[prec_is])
# prec_minisotopes.append(prec_is.min())
# prec_isotopes.append(prec_is - prec_minisotopes[-1])
a[i] = int_arr_norm
end_index = min(end_index, len(peak.finished_hills))
size = end_index
ready_set = set()
charges = list(range(min_charge, max_charge + 1, 1)[::-1])
numbers = []
for k in range(10):
numbers.append(k)
for i in range(start_index, end_index, 1):
if peak.finished_hills[i].scan_len >= min_length:
peak_1_mz = peak.finished_hills[i].mz
left_border_i = peak.finished_hills[i].scan_id[0]
right_border_i = peak.finished_hills[i].scan_id[-1]
mz_tol = mass_acc * 1e-6 * peak.finished_hills[i].mz
# s_tmp_intensity = s_list
# s_all_theoretical_int = [
# peak.finished_hills[i].max_intensity *
# s_tmp_intensity[z] /
# s_tmp_intensity[0] for z in numbers[:2]]
for charge in charges:
candidates = []
s_candidates = []
k = i
ks = i
for numb in numbers[1:]:
tmp_candidates = []
tmp_s_candidates = []
m_to_check = peak_1_mz + (1.00335 * numb / charge)
m_to_check_fast = int(m_to_check/mz_step)
for j in peak.get_potential_isotope_id(m_to_check_fast, i):
peak_2_mz = peak.finished_hills[j].mz
diff = peak_2_mz - m_to_check
if abs(diff) <= mz_tol and (peak.finished_hills[i].opt_ion_mobility is None or abs(peak.finished_hills[i].opt_ion_mobility-peak.finished_hills[j].opt_ion_mobility) <= 0.01):
cos_cor_test = cos_correlation_fill_zeroes(
peak.finished_hills[i],
peak.finished_hills[j])
if cos_cor_test >= 0.6:
tmp_candidates.append((j, charge, cos_cor_test, diff/m_to_check*1e6, 0))
if numb == 1:
diff_for_output = diff / peak_2_mz
# if numb == 2:
# for n_sulf in range(1, 4, 1):
# m_to_check2 = peak_1_mz + (1.00335 * (numb - 2) / charge) + (1.9957958999999974 / charge)
# sulf_int = s_dict[n_sulf][0]
# # sulf_int = 0.0425 * tmp_intensity[numb-2]
# m_to_check2 = (m_to_check2 * sulf_int + m_to_check * tmp_intensity[numb]) / (sulf_int+tmp_intensity[numb])
# m_to_check2_fast = int(m_to_check2/0.02)
# # print(m_to_check, m_to_check2)
# for j in peak.get_potential_isotope_id(m_to_check2_fast, i):
# if j not in ready_set and (peak.finished_hills[i].opt_ion_mobility is None or abs(peak.finished_hills[i].opt_ion_mobility-peak.finished_hills[j].opt_ion_mobility) <= 0.01):
# peak_2_mz = peak.finished_hills[j].mz
# diff = peak_2_mz - m_to_check2
# if abs(diff) <= mz_tol:
# cos_cor_test = cos_correlation_fill_zeroes(
# peak.finished_hills[i],
# peak.finished_hills[j])
# if cos_cor_test >= 0.6:
# tmp_candidates.append((j, charge, cos_cor_test, diff/m_to_check2*1e6, n_sulf))
# if numb == 2:
# m_to_check = peak_1_mz + \
# (1.9957958999999974 / 2 * numb / charge)
# m_to_check_fast = int(m_to_check/0.02)
# # for j in range(ks + 1, size, 1):
# for j in peak.get_potential_isotope_id(m_to_check_fast, i):
# if j not in ready_set and j != candidates[-1][0]:
# peak_2_mz = peak.finished_hills[j].mz
# diff = peak_2_mz - m_to_check
# # if diff > mz_tol:
# # ks = j - 1
# # break
# # if
# # cos_correlation_fill_zeroes(peak.finished_hills[i],
# # peak.finished_hills[j]) >= 0.7:
# if abs(diff) <= mz_tol:
# s_cos_cor = cos_correlation_fill_zeroes(
# peak.finished_hills[i],
# peak.finished_hills[j])
# if s_cos_cor >= 0.6:
# tmp_s_candidates.append([j, charge, s_cos_cor, diff/m_to_check*1e6])
# # if len(s_candidates) < numb / 2:
# # s_candidates.append(
# # (j, charge))
# # else:
# # intensity1 = (
# # peak.finished_hills[
# # s_candidates[-1][0]]
# # .max_intensity)
# # intensity2 = (
# # peak.finished_hills[j]
# # .max_intensity)
# # s_th_i = s_all_theoretical_int[
# # int(numb / 2)]
# # s_candidates[-1] = (
# # first_or_second(
# # s_candidates[-1][0],
# # j,
# # s_candidates[-1][1],
# # charge,
# # intensity1,
# # intensity2,
# # s_th_i))
# # pass
if len(tmp_candidates):
# if len(tmp_candidates) > 1:
# print(len(tmp_candidates))
candidates.append(tmp_candidates)
# if len(tmp_s_candidates):
# s_candidates = tmp_s_candidates
if len(candidates) < numb:
break
# if len(candidates) > 0: # FIXME
# break
if candidates:
neutral_mass = peak_1_mz * charge
tmp_intensity = a[int(100 * (neutral_mass // 100))]
all_theoretical_int = [
peak.finished_hills[i].max_intensity *
tmp_intensity[z] /
tmp_intensity[0] for z in numbers]
# s_candidates = []
# if len(s_candidates):
# tmp_s_candidates = []
# for iter_s_candidates in s_candidates:
# s_all_exp_intensity = [peak.finished_hills[i].max_intensity]
# # for k in iter_s_candidates:
# s_all_exp_intensity.append(
# peak.finished_hills[iter_s_candidates[0]].max_intensity)
# s_c_cor = cos_correlation(
# s_all_theoretical_int,
# s_all_exp_intensity)
# if s_c_cor > 0.6:
# tmp_s_candidates.append(iter_s_candidates)
# tmp_s_candidates[-1].append(s_c_cor)
# if len(tmp_s_candidates):
# s_candidates = sorted(tmp_s_candidates, key=lambda x: -x[3])
# else:
# s_candidates = []
for iter_candidates in itertools.product(*candidates):
# if len(iter_candidates) > 1:
# basic_sulfur = iter_candidates[1][4]
# else:
# basic_sulfur = 0
# # print(basic_sulfur)
# iter_candidates_new = []
# for z_idx, z in enumerate(iter_candidates):
# if z_idx > 0:
# if z[4] == basic_sulfur:
# iter_candidates_new.append(z)
# else:
# break
# else:
# iter_candidates_new.append(z)
# iter_candidates = iter_candidates_new
all_exp_intensity = [peak.finished_hills[i].max_intensity]
for j in iter_candidates:
if j[1] != 0:
all_exp_intensity.append(
peak.finished_hills[j[0]].max_intensity)
else:
all_exp_intensity.append(0)
(
cos_corr,
number_of_passed_isotopes,
shift) = checking_cos_correlation_for_carbon(
all_theoretical_int, all_exp_intensity, 0.6)
cos_corr_for_output = cos_correlation(
all_theoretical_int[0:1],
all_exp_intensity[0:1])
if cos_corr: # прикрутить изменение параметра 0.6
# print(shift)
iter_candidates = iter_candidates[:number_of_passed_isotopes]
# добавить s_candidates
j2 = iter_candidates[0][0]
scan_id_2 = peak.finished_hills[j2].scan_id
mass_2 = peak.finished_hills[j2].mass
intensity_2 = peak.finished_hills[j2].intensity
ready.append([
i,
iter_candidates,
s_candidates,
shift,
[cos_corr,
cos_corr_for_output,
cos_cor_test,
diff_for_output,
peak.finished_hills[i].intensity,
peak.finished_hills[i].scan_id,
peak.finished_hills[i].mass,
intensity_2,
scan_id_2,
mass_2],
[all_theoretical_int, all_exp_intensity]])
# ready_set.add(i)
# for ic in candidates:
# if ic[1] != 0:
# ready_set.add(ic[0])
# for ic in s_candidates:
# ready_set.add(ic[0])
# ready = sorted(ready, key=lambda x: -len(x[1]))
# ready_final = []
# ready_set = set()
# for pep_feature in ready:
# if pep_feature[0] not in ready_set:
# ready_final.append(pep_feature)
# ready_set.add(pep_feature[0])
logging.info(
u'All hills were iterated correctly with this process /' +
str(proccess_number + 1) + '/ -->')
return ready
def worker_data_to_features(
data_for_analyse,
qout,
start_index,
end_index,
mass_accuracy,
min_length_hill, hillValleyFactor, proccess_number):
start_index = start_index * (1 - 1e-6 * 2 * mass_accuracy)
end_index = end_index * (1 + 1e-6 * 2 * end_index)
result_peak, result_RT_dict = data_to_features(
data_for_analyse,
mass_accuracy,
min_length_hill,
proccess_number,
start_index,
end_index
)
result_peak.split_peaks(hillValleyFactor, min_length_hill)
set_to_del = set()
for hill_idx, hill in enumerate(result_peak.finished_hills):
if len(hill.mass) >= 40:
if max(hill.intensity) < 2 * max(hill.intensity[0], hill.intensity[-1]):
set_to_del.add(hill_idx)
for idx in sorted(list(set_to_del))[::-1]:
del result_peak.finished_hills[idx]
result_peak.calc_accurate_mz()
if result_peak:
qout.put((result_peak, result_RT_dict))
qout.put(None)
def boosting_firststep_with_processes(
number_of_processes,
data_for_analyse,
mass_accuracy,
min_length_hill,
hillValleyFactor,
data_start_index=0):
for idx, v in enumerate(data_for_analyse):
v['index'] = idx + 1 + data_start_index
if number_of_processes == 0:
try:
number_of_processes = cpu_count()
except NotImplementedError:
number_of_processes = 1
if number_of_processes == 1:
result_peak, result_RT_dict = data_to_features(
data_for_analyse, mass_accuracy, min_length_hill, 1, 0, 2500)
result_peak.split_peaks(hillValleyFactor, min_length_hill)
set_to_del = set()
for hill_idx, hill in enumerate(result_peak.finished_hills):
if len(hill.mass) >= 40:
if max(hill.intensity) < 2 * max(hill.intensity[0], hill.intensity[-1]):
set_to_del.add(hill_idx)
for idx in sorted(list(set_to_del))[::-1]:
del result_peak.finished_hills[idx]
result_peak.calc_accurate_mz()
else:
qout = Queue()
# qin = list(islice(it, 500000))
# if not len(qin):
# break
# # print 'Loaded 500000 items. Ending cycle.'
procs = []
data_for_analyse_len = len(data_for_analyse)
# step = int(data_for_analyse_len / number_of_processes) + 1
step = int(2500 / number_of_processes / 3) + 1
# start_index = 0
start_mz = 100
for i in range(number_of_processes * 3):
p = Process(
target=worker_data_to_features,
args=(
data_for_analyse,
qout,
start_mz,
step + start_mz,
mass_accuracy,
min_length_hill, hillValleyFactor, i))
# print(start_index)
p.start()
procs.append(p)
start_mz += step
result_peak = False
result_RT_dict = False#dict()
# all_peaks = []
for _ in range(number_of_processes * 3):
for item in iter(qout.get, None):
# all_peaks.append(item[0])
# result_RT_dict.update(item[1])
# print(len(item[0].finished_hills))
if not result_peak:
# print(item[0].mz_array)
result_peak, result_RT_dict = item[0], item[1]
else:
# print(item[0].mz_array)
result_peak.concat_peak_with(item[0])
result_RT_dict.update(item[1])
# result_peak = concat_peaks(all_peaks)
# print(len(result_peak.finished_hills))
for p in procs:
p.join()
return result_peak, result_RT_dict
def concat_peaks(all_peaks):
all_peaks = sorted(all_peaks, key=lambda x: x.intervals[0])
result_peak = all_peaks[0]
for peak in all_peaks[1:]:
result_peak.concat_peak_new(peak)
return result_peak
def worker_iter_hills(
peak,
qout,
start_index,
end_index,
min_charge,
max_charge,
min_intensity,
mass_accuracy,
min_length,
proccess_number
):
result_q = iter_hills(
peak,
min_charge,
max_charge,
min_intensity,
mass_accuracy,
start_index,
end_index,
min_length,
proccess_number)
if result_q:
qout.put(result_q)
qout.put(None)
def noisygaus(x, a, x0, sigma, b):
return a * np.exp(-(x - x0) ** 2 / (2 * sigma ** 2)) + b
def calibrate_mass(bwidth, mass_left, mass_right, true_md):
bbins = np.arange(-mass_left, mass_right, bwidth)
H1, b1 = np.histogram(true_md, bins=bbins)
b1 = b1 + bwidth
b1 = b1[:-1]
popt, pcov = curve_fit(noisygaus, b1, H1, p0=[1, np.median(true_md), 1, 1])
mass_shift, mass_sigma = popt[1], abs(popt[2])
return mass_shift, mass_sigma, pcov[0][0]
def boosting_secondstep_with_processes(
number_of_processes,
peak,
min_charge,
max_charge,
min_intensity,
mass_accuracy,
min_length):
if number_of_processes == 0:
try:
number_of_processes = cpu_count()
except NotImplementedError:
number_of_processes = 1
if number_of_processes == 1:
ready = iter_hills(
peak, min_charge, max_charge, min_intensity, mass_accuracy, 0, len(
peak.finished_hills), min_length)
else:
qout = Queue()
# qin = list(islice(it, 500000))
# if not len(qin):
# break
# # print 'Loaded 500000 items. Ending cycle.'
procs = []
peak_len = len(peak.finished_hills)
step = int(peak_len / number_of_processes) + 1
start_index = 0
for i in range(number_of_processes):
p = Process(
target=worker_iter_hills,
args=(
peak,
qout,
start_index,
step + start_index,
min_charge,
max_charge,
min_intensity,
mass_accuracy,
min_length,
i))
# print(start_index)
p.start()
procs.append(p)
start_index += step
ready = False
for _ in range(number_of_processes):
for item in iter(qout.get, None):
if not ready:
ready = item
else:
ready = ready + item
for p in procs:
p.join()
ready = sorted(ready, key=lambda x: -len(x[1]))
ready_final = []
ready_set = set()
# import pickle
# pickle.dump(ready, open('ready.pickle', 'wb'))
isotopes_mass_error_map = {}
for ic in range(1, 10, 1):
isotopes_mass_error_map[ic] = []
for pep_feature in ready:
for icc, cand in enumerate(pep_feature[1]):
if icc != 1 or cand[4] == 0:
isotopes_mass_error_map[icc+1].append(cand[3])
for ic in range(1, 10, 1):
if ic == 1 and len(isotopes_mass_error_map[ic]) >= 10:
try:
true_md = np.array(isotopes_mass_error_map[ic])
mass_left = -min(isotopes_mass_error_map[ic])
mass_right = max(isotopes_mass_error_map[ic])
try:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.01, mass_left, mass_right, true_md)
except:
try:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.05, mass_left, mass_right, true_md)
except:
mass_shift, mass_sigma, covvalue = calibrate_mass(0.25, mass_left, mass_right, true_md)
if np.isinf(covvalue):
mass_shift, mass_sigma, covvalue = calibrate_mass(0.05, mass_left, mass_right, true_md)
isotopes_mass_error_map[ic] = [mass_shift, mass_sigma]
except:
isotopes_mass_error_map[ic] = isotopes_mass_error_map[ic-1]
else:
if ic-1 in isotopes_mass_error_map:
isotopes_mass_error_map[ic] = deepcopy(isotopes_mass_error_map[ic-1])
isotopes_mass_error_map[ic][0] = isotopes_mass_error_map[ic][0] - 0.45
else:
isotopes_mass_error_map[ic] = [0, 10]
print(isotopes_mass_error_map)
for pfidx, pep_feature in enumerate(ready):
allowed_idx = 1
for icc, cand in enumerate(pep_feature[1]):
if abs(cand[3] - isotopes_mass_error_map[icc+1][0])/isotopes_mass_error_map[icc+1][1] <= 5 or (icc == 1 and cand[4] > 0):
allowed_idx += 1
else:
break
all_theoretical_int, all_exp_intensity = pep_feature[5]
all_theoretical_int = all_theoretical_int[:allowed_idx]
all_exp_intensity = all_exp_intensity[:allowed_idx]
ready[pfidx][1] = ready[pfidx][1][:allowed_idx]
ready[pfidx][5] = [all_theoretical_int, all_exp_intensity]
ready[pfidx].append(min(checking_cos_correlation_for_carbon(
all_theoretical_int, all_exp_intensity, 0.6)[0], 0.99999999))
# ready = sorted(ready, key=lambda x: -x[-1])
ready = sorted(ready, key=lambda x: -len(x[-2][0])-x[-1])
# ready = sorted(ready, key=lambda x: -len(x[-2][0]))
# for pep_feature in ready:
# if pep_feature[0] not in ready_set:
# if not any(cand[0] in ready_set for cand in pep_feature[1]):
# ready_final.append(pep_feature)
# ready_set.add(pep_feature[0])
# for cand in pep_feature[1]:
# ready_set.add(cand[0])
# # for s_cand in pep_feature[2]:
# # if s_cand[0] not in ready_set:
# # ready_set.add(s_cand[0])
# # break
# else:
# tmp = []
# for cand in pep_feature[1]:
# if cand[0] not in ready_set:
# tmp.append(cand)
# else:
# break
# if len(tmp):
# pep_feature[1] = tmp
# all_theoretical_int, all_exp_intensity = pep_feature[5]
# all_theoretical_int = all_theoretical_int[:len(tmp)]
# all_exp_intensity = all_exp_intensity[:len(tmp)]
# (cos_corr,
# number_of_passed_isotopes,
# shift) = checking_cos_correlation_for_carbon(
# all_theoretical_int, all_exp_intensity, 0.6)
# if cos_corr:
# ready_final.append(pep_feature)
# ready_set.add(pep_feature[0])
# for cand in pep_feature[1]:
# ready_set.add(cand[0])
# # for s_cand in pep_feature[2]:
# # if s_cand[0] not in ready_set:
# # ready_set.add(s_cand[0])
# # break
max_l = len(ready)
cur_l = 0
ready_final = []
ready_set = set()
ready = sorted(ready, key=lambda x: -len(x[-2][0])-x[-1])
cur_isotopes = len(ready[0][-2][0])
cnt_mark = 0
while cur_l < max_l:
cnt_mark += 1
# if cnt_mark > 1000:
# break
pep_feature = ready[cur_l]
# print(cur_l, max_l, cur_isotopes, len(ready_final), -len(pep_feature[-2][0])-pep_feature[-1])
n_iso = len(pep_feature[-2][0])
if n_iso < cur_isotopes:
ready = sorted(ready, key=lambda x: -len(x[-2][0]))
cur_isotopes = n_iso
cur_l = 0
if pep_feature[0] not in ready_set:
if not any(cand[0] in ready_set for cand in pep_feature[1]):
ready_final.append(pep_feature)
ready_set.add(pep_feature[0])
for cand in pep_feature[1]:
ready_set.add(cand[0])
for s_cand in pep_feature[2]:
ready_set.add(s_cand[0])
del ready[cur_l]
max_l -= 1
cur_l -= 1
else:
tmp = []
# cur_isotopes = len(pep_feature[1])
for cand in pep_feature[1]:
if cand[0] not in ready_set:
tmp.append(cand)
else:
break
if len(tmp):
pep_feature[1] = tmp
all_theoretical_int, all_exp_intensity = pep_feature[5]
all_theoretical_int = all_theoretical_int[:len(tmp)]
all_exp_intensity = all_exp_intensity[:len(tmp)]
(cos_corr,
number_of_passed_isotopes,
shift) = checking_cos_correlation_for_carbon(
all_theoretical_int, all_exp_intensity, 0.6)
if cos_corr:
ready[cur_l] = [pep_feature[0],
pep_feature[1],
pep_feature[2],
pep_feature[3],
pep_feature[4],
[all_theoretical_int, all_exp_intensity],
cos_corr]
else:
del ready[cur_l]
max_l -= 1
cur_l -= 1
else:
del ready[cur_l]
max_l -= 1
cur_l -= 1
else:
del ready[cur_l]
max_l -= 1
cur_l -= 1
# ready = ready[:cur_l] + sorted(ready[cur_l:], key=lambda x: -len(x[-2][0])-x[-1])
# cur_l -= 1
cur_l += 1
return ready_final, isotopes_mass_error_map
#FIXME исправить функцию для подсчета по списку необходимых индексов
def func_for_correlation_matrix(set_of_features):
logging.info(u'Counting features correlation...')
out_put_dict = defaultdict(list)
set_length = len(set_of_features)
each_id = 0
# for each_id, each_feature in enumerate(set_of_features[:-1]):
while each_id < set_length - 1:
each_feature = set_of_features[each_id]
if each_id % 50 == 0:
logging.info(
u'Calculated ' +
str(each_id + 1) +
'/' + str(set_length) +
' features.')
other_id = each_id + 1
while other_id < set_length:
other_feature = set_of_features[other_id]
if other_feature.scans[0] > each_feature.scans[-1]:
break
tmp_corr = cos_correlation_fill_zeroes(
each_feature,
other_feature)
if tmp_corr > 0.5:
out_put_dict[each_feature.id] += [{other_feature.id: tmp_corr}]
out_put_dict[other_feature.id] += [{each_feature.id: tmp_corr}]
other_id += 1
each_id += 1
return out_put_dict
def func_for_correlation_matrix_2(set_of_features, idx_list):
logging.info(u'Counting features correlation...')
out_put_dict = defaultdict(list)
# for each_id, each_feature in enumerate(set_of_features[:-1]):
for i in idx_list:
each_feature = set_of_features[i]
if i % 50 == 0:
logging.info(
u'Calculated ' +
str(i + 1) +
'/' + str(len(idx_list)) +
' features.')
other_id = i + 1
while other_id < idx_list[-1] + 1:
other_feature = set_of_features[other_id]
if other_feature.scans[0] > each_feature.scans[-1]:
break
tmp_corr = cos_correlation_fill_zeroes(
each_feature,
other_feature)
if tmp_corr > 0.5:
out_put_dict[each_feature.id] += [{other_feature.id: tmp_corr}]
out_put_dict[other_feature.id] += [{each_feature.id: tmp_corr}]
other_id += 1
return out_put_dict
def worker_func_for_correlation_matrix(
set_of_features,
qout,
idx_list
):
result_q = func_for_correlation_matrix_2(
set_of_features,
idx_list,
)
if result_q:
qout.put(result_q)
qout.put(None)
def boosting_correlation_matrix_with_processes(
number_of_processes, set_of_features):
if number_of_processes == 0:
try:
number_of_processes = cpu_count()
except NotImplementedError:
number_of_processes = 1
if number_of_processes == 1:
result_q = func_for_correlation_matrix(
set_of_features
)
else:
qout = Queue()
# qin = list(islice(it, 500000))
# if not len(qin):
# break
# # print 'Loaded 500000 items. Ending cycle.'
procs = []
#FIXME пофиксить(проверить) индексы в idx_list
set_len = len(set_of_features)
step = int(set_len / 2 / number_of_processes) + 1
start_index = 0
for i in range(number_of_processes):
idx_list = [x for x in range(i * step, (i + 1) * step)] + [x for x in range(set_len - (i+1) * step, set_len - i * step)]
p = Process(
target=worker_func_for_correlation_matrix,
args=(
set_of_features,
qout,
idx_list))
# print(start_index)
p.start()
procs.append(p)
result_q = False
for _ in range(number_of_processes):
for item in iter(qout.get, None):
# print(len(item[0].finished_hills))
if not result_q:
# print(item[0].mz_array)
result_q = item
else:
# print(item[0].mz_array)
for key in item:
result_q.update(item[key])
# print(len(result_peak.finished_hills))
for p in procs:
p.join()
return result_q | PypiClean |
/Django-Bootstrap3-Validator-0.3.3.zip/Django-Bootstrap3-Validator-0.3.3/bootstrap_validator/static/validator/js/language/pl_PL.js | (function($) {
/**
* Polish language package
* Translated by @grzesiek
*/
$.fn.bootstrapValidator.i18n = $.extend(true, $.fn.bootstrapValidator.i18n, {
base64: {
'default': 'Wpisz poprawny ciąg znaków zakodowany w base 64'
},
between: {
'default': 'Wprowadź wartość pomiędzy %s i %s',
notInclusive: 'Wprowadź wartość pomiędzy %s i %s (zbiór otwarty)'
},
callback: {
'default': 'Wprowadź poprawną wartość'
},
choice: {
'default': 'Wprowadź poprawną wartość',
less: 'Wybierz przynajmniej %s opcji',
more: 'Wybierz maksymalnie %s opcji',
between: 'Wybierz przynajmniej %s i maksymalnie %s opcji'
},
color: {
'default': 'Wprowadź poprawny kolor w formacie'
},
creditCard: {
'default': 'Wprowadź poprawny numer karty kredytowej'
},
cusip: {
'default': 'Wprowadź poprawny numer CUSIP'
},
cvv: {
'default': 'Wprowadź poprawny numer CVV'
},
date: {
'default': 'Wprowadź poprawną datę',
min: 'Wprowadź datę po %s',
max: 'Wprowadź datę przed %s',
range: 'Wprowadź datę pomiędzy %s i %s'
},
different: {
'default': 'Wprowadź inną wartość'
},
digits: {
'default': 'Wprowadź tylko cyfry'
},
ean: {
'default': 'Wprowadź poprawny numer EAN'
},
emailAddress: {
'default': 'Wprowadź poprawny adres e-mail'
},
file: {
'default': 'Wybierz prawidłowy plik'
},
greaterThan: {
'default': 'Wprowadź wartość większą bądź równą %s',
notInclusive: 'Wprowadź wartość większą niż %s'
},
grid: {
'default': 'Wprowadź poprawny numer GRId'
},
hex: {
'default': 'Wprowadź poprawną liczbę w formacie heksadecymalnym'
},
hexColor: {
'default': 'Wprowadź poprawny kolor w formacie hex'
},
iban: {
'default': 'Wprowadź poprawny numer IBAN',
countryNotSupported: 'Kod kraju %s nie jest obsługiwany',
country: 'Wprowadź poprawny numer IBAN w kraju %s',
countries: {
AD: 'Andora',
AE: 'Zjednoczone Emiraty Arabskie',
AL: 'Albania',
AO: 'Angola',
AT: 'Austria',
AZ: 'Azerbejdżan',
BA: 'Bośnia i Hercegowina',
BE: 'Belgia',
BF: 'Burkina Faso',
BG: 'Bułgaria',
BH: 'Bahrajn',
BI: 'Burundi',
BJ: 'Benin',
BR: 'Brazylia',
CH: 'Szwajcaria',
CI: 'Wybrzeże Kości Słoniowej',
CM: 'Kamerun',
CR: 'Kostaryka',
CV: 'Republika Zielonego Przylądka',
CY: 'Cypr',
CZ: 'Czechy',
DE: 'Niemcy',
DK: 'Dania',
DO: 'Dominikana',
DZ: 'Algeria',
EE: 'Estonia',
ES: 'Hiszpania',
FI: 'Finlandia',
FO: 'Wyspy Owcze',
FR: 'Francja',
GB: 'Wielka Brytania',
GE: 'Gruzja',
GI: 'Gibraltar',
GL: 'Grenlandia',
GR: 'Grecja',
GT: 'Gwatemala',
HR: 'Chorwacja',
HU: 'Węgry',
IE: 'Irlandia',
IL: 'Izrael',
IR: 'Iran',
IS: 'Islandia',
IT: 'Włochy',
JO: 'Jordania',
KW: 'Kuwejt',
KZ: 'Kazahstan',
LB: 'Liban',
LI: 'Liechtenstein',
LT: 'Litwa',
LU: 'Luksemburg',
LV: 'Łotwa',
MC: 'Monako',
MD: 'Mołdawia',
ME: 'Czarnogóra',
MG: 'Madagaskar',
MK: 'Macedonia',
ML: 'Mali',
MR: 'Mauretania',
MT: 'Malta',
MU: 'Mauritius',
MZ: 'Mozambik',
NL: 'Holandia',
NO: 'Norwegia',
PK: 'Pakistan',
PL: 'Polska',
PS: 'Palestyna',
PT: 'Portugalia',
QA: 'Katar',
RO: 'Rumunia',
RS: 'Serbia',
SA: 'Arabia Saudyjska',
SE: 'Szwecja',
SI: 'Słowenia',
SK: 'Słowacja',
SM: 'San Marino',
SN: 'Senegal',
TN: 'Tunezja',
TR: 'Turcja',
VG: 'Brytyjskie Wyspy Dziewicze'
}
},
id: {
'default': 'Wprowadź poprawny numer identyfikacyjny',
countryNotSupported: 'Kod kraju %s nie jest obsługiwany',
country: 'Wprowadź poprawny numer identyfikacyjny w kraju %s',
countries: {
BA: 'Bośnia i Hercegowina',
BG: 'Bułgaria',
BR: 'Brazylia',
CH: 'Szwajcaria',
CL: 'Chile',
CN: 'Chiny',
CZ: 'Czechy',
DK: 'Dania',
EE: 'Estonia',
ES: 'Hiszpania',
FI: 'Finlandia',
HR: 'Chorwacja',
IE: 'Irlandia',
IS: 'Islandia',
LT: 'Litwa',
LV: 'Łotwa',
ME: 'Czarnogóra',
MK: 'Macedonia',
NL: 'Holandia',
RO: 'Rumunia',
RS: 'Serbia',
SE: 'Szwecja',
SI: 'Słowenia',
SK: 'Słowacja',
SM: 'San Marino',
TH: 'Tajlandia',
ZA: 'Republika Południowej Afryki'
}
},
identical: {
'default': 'Wprowadź taką samą wartość'
},
imei: {
'default': 'Wprowadź poprawny numer IMEI'
},
imo: {
'default': 'Wprowadź poprawny numer IMO'
},
integer: {
'default': 'Wprowadź poprawną liczbę całkowitą'
},
ip: {
'default': 'Wprowadź poprawny adres IP',
ipv4: 'Wprowadź poprawny adres IPv4',
ipv6: 'Wprowadź poprawny adres IPv6'
},
isbn: {
'default': 'Wprowadź poprawny numer ISBN'
},
isin: {
'default': 'Wprowadź poprawny numer ISIN'
},
ismn: {
'default': 'Wprowadź poprawny numer ISMN'
},
issn: {
'default': 'Wprowadź poprawny numer ISSN'
},
lessThan: {
'default': 'Wprowadź wartość mniejszą bądź równą %s',
notInclusive: 'Wprowadź wartość mniejszą niż %s'
},
mac: {
'default': 'Wprowadź poprawny adres MAC'
},
meid: {
'default': 'Wprowadź poprawny numer MEID'
},
notEmpty: {
'default': 'Wprowadź wartość, pole nie może być puste'
},
numeric: {
'default': 'Wprowadź poprawną liczbę zmiennoprzecinkową'
},
phone: {
'default': 'Wprowadź poprawny numer telefonu',
countryNotSupported: 'Kod kraju %s nie jest wspierany',
country: 'Wprowadź poprawny numer telefonu w kraju %s',
countries: {
BR: 'Brazylia',
CN: 'Chiny',
CZ: 'Czechy',
DE: 'Niemcy',
DK: 'Dania',
ES: 'Hiszpania',
FR: 'Francja',
GB: 'Wielka Brytania',
MA: 'Maroko',
PK: 'Pakistan',
RO: 'Rumunia',
RU: 'Rosja',
SK: 'Słowacja',
TH: 'Tajlandia',
US: 'USA',
VE: 'Wenezuela'
}
},
regexp: {
'default': 'Wprowadź wartość pasującą do wzoru'
},
remote: {
'default': 'Wprowadź poprawną wartość'
},
rtn: {
'default': 'Wprowadź poprawny numer RTN'
},
sedol: {
'default': 'Wprowadź poprawny numer SEDOL'
},
siren: {
'default': 'Wprowadź poprawny numer SIREN'
},
siret: {
'default': 'Wprowadź poprawny numer SIRET'
},
step: {
'default': 'Wprowadź wielokrotność %s'
},
stringCase: {
'default': 'Wprowadź tekst składającą się tylko z małych liter',
upper: 'Wprowadź tekst składający się tylko z dużych liter'
},
stringLength: {
'default': 'Wprowadź wartość o poprawnej długości',
less: 'Wprowadź mniej niż %s znaków',
more: 'Wprowadź więcej niż %s znaków',
between: 'Wprowadź wartość składająca się z min %s i max %s znaków'
},
uri: {
'default': 'Wprowadź poprawny URI'
},
uuid: {
'default': 'Wprowadź poprawny numer UUID',
version: 'Wprowadź poprawny numer UUID w wersji %s'
},
vat: {
'default': 'Wprowadź poprawny numer VAT',
countryNotSupported: 'Kod kraju %s nie jest wsperany',
country: 'Wprowadź poprawny numer VAT w kraju %s',
countries: {
AT: 'Austria',
BE: 'Belgia',
BG: 'Bułgaria',
BR: 'Brazylia',
CH: 'Szwajcaria',
CY: 'Cypr',
CZ: 'Czechy',
DE: 'Niemcy',
DK: 'Dania',
EE: 'Estonia',
ES: 'Hiszpania',
FI: 'Finlandia',
FR: 'Francja',
GB: 'Wielka Brytania',
GR: 'Grecja',
EL: 'Grecja',
HU: 'Węgry',
HR: 'Chorwacja',
IE: 'Irlandia',
IS: 'Islandia',
IT: 'Włochy',
LT: 'Litwa',
LU: 'Luksemburg',
LV: 'Łotwa',
MT: 'Malta',
NL: 'Holandia',
NO: 'Norwegia',
PL: 'Polska',
PT: 'Portugalia',
RO: 'Rumunia',
RU: 'Rosja',
RS: 'Serbia',
SE: 'Szwecja',
SI: 'Słowenia',
SK: 'Słowacja',
VE: 'Wenezuela',
ZA: 'Republika Południowej Afryki'
}
},
vin: {
'default': 'Wprowadź poprawny numer VIN'
},
zipCode: {
'default': 'Wprowadź poprawny kod pocztowy',
countryNotSupported: 'Kod kraju %s nie jest obsługiwany',
country: 'Wprowadź poprawny kod pocztowy w kraju %s',
countries: {
AT: 'Austria',
BR: 'Brazylia',
CA: 'Kanada',
CH: 'Szwajcaria',
CZ: 'Czechy',
DE: 'Niemcy',
DK: 'Dania',
FR: 'Francja',
GB: 'Wielka Brytania',
IE: 'Irlandia',
IT: 'Włochy',
MA: 'Maroko',
NL: 'Holandia',
PT: 'Portugalia',
RO: 'Rumunia',
RU: 'Rosja',
SE: 'Szwecja',
SG: 'Singapur',
SK: 'Słowacja',
US: 'USA'
}
}
});
}(window.jQuery)); | PypiClean |
/DynaMIT-1.1.5.tar.gz/DynaMIT-1.1.5/dynamit/graphProtSearcher.py |
from __future__ import print_function
from __future__ import division
from builtins import str
import os, shutil, subprocess, itertools
from subprocess import CalledProcessError
import dynamit.motifSearcher
import dynamit.utils
class GraphProtSearcher(dynamit.motifSearcher.MotifSearcher):
"""Class implementing a motif searcher running the
GraphProt motif search tool, identifying the binding
preferences for an RBP. Requires a positive binding
sequences set (the input sequences) and a negative
non-binding set (specified in the searcher configuration).
"""
def __init__(self):
"""Initialize all class attributes with their default values.
"""
super(self.__class__, self).__init__()
self.searcherName = "GraphProt"
self.path = ""
self.sfoldPath = ""
self.negativesFilename = ""
def setConfiguration(self, path, params):
"""Loads the searcher parameters specified in the configuration file.
Args:
path: path of the GraphProt executable file.
params: parameters to be given to GraphProt along with the sequences filename.
Returns:
Returns 0 if everything went fine, 1 and an error message otherwise.
"""
if path != "":
self.path = path
if params != "":
info = params.split(',')
# store negative sequences filename.
self.negativesFilename = info[0]
# store additional parameters, if any.
if len(info) > 1:
self.params = info[1]
if self.negativesFilename == "":
print("[ERROR] Negative sequences (unbound sites) filename " \
"specification missing.")
return 1
return 0
def runSearch(self, sequencesFilename):
"""Performs motif search with the GraphProt
tool, looking for the RBP binding preference,
with provided input sequences filename, negative
non-bound sequences and configured parameters.
Args:
sequencesFilename: input sequences filename for this run.
Returns:
Returns a list of strings representing motifs matches if
everything went fine (details on results filenames, etc., are
printed to the console); returns 1 and an error message otherwise.
"""
# get current working directory
cwd = os.getcwd()
# move to GraphProt directory (it won't work if launched outside it)
os.chdir(self.path)
# prepare sequences list to be later passed to processGraphProtResults.
sequences = [(seq.description, str(seq.seq)) for seq in \
dynamit.utils.getSequencesRecords(sequencesFilename)]
try:
# perform model training.
completePath = "perl " + os.path.join(self.path, "GraphProt.pl") + \
" -action train -fasta \"" + sequencesFilename + \
"\" -negfasta \"" + self.negativesFilename + "\""
subprocess.check_output(completePath, shell=True, stderr=subprocess.STDOUT)
# perform binding sites prediction
completePath = os.path.join(self.path, "GraphProt.pl") + \
" -action predict_has -fasta \"" + \
sequencesFilename + "\" -negfasta \"" + \
self.negativesFilename + "\" -model GraphProt.model"
subprocess.check_output(completePath, shell=True, stderr=subprocess.STDOUT)
# check if GraphProt results exist
if os.path.isfile("GraphProt.has"):
# extract results
print(" [GraphProtSearcher] Search completed.")
self.searchResults = self._processGraphProtResults(sequences,
"GraphProt.has")
else:
print("[ERROR] Could not find GraphProt results file.")
return 1
# go back to working directory.
os.chdir(cwd)
except CalledProcessError as e:
# inform about the error that happened,
print("[ERROR] GraphProt execution terminated with an error:" + e.output)
# go back to working directory
os.chdir(cwd)
# abort searcher execution.
return 1
# move GraphProt results files to our working folder.
if os.path.isfile(os.path.join(self.path, "GraphProt.model")):
shutil.move(os.path.join(self.path, "GraphProt.model"),
os.path.join(cwd, "GraphProt.model"))
if os.path.isfile(os.path.join(self.path, "GraphProt.has")):
shutil.move(os.path.join(self.path, "GraphProt.has"),
os.path.join(cwd, "GraphProt.has"))
print(" [GraphProtSearcher] Execution completed.")
return self.searchResults
def _processGraphProtResults(self, sequences, resultsFilename):
""" Process results contained in GraphProt output files to
produce a table for subsequent DynaMIT phases.
Args:
sequences: list of sequences sorted by their position in
the input sequences filename, allow to map GraphProt
sequences IDs to input filename IDs.
resultsFilename: the GraphProt results filename.
Returns:
Returns a list of strings, one per motif match, containing
motif sequence, sequence id, match position, etc.
"""
print(" [GraphProtSearcher] Processing results: <", \
os.path.basename(resultsFilename), ">")
try:
# get results lines from GraphProt output file.
with open(resultsFilename) as f:
lines = f.readlines()
processedResults = []
positionsBySeq = {}
if len(lines) == 0:
# GraphProt found zero motifs, so return an empty results list.
return []
else:
# now read lines to put together contiguous nucleotides to form
# instances of the motifs.
for line in lines:
info = line.rstrip('\n').split('\t')
if info[0] not in positionsBySeq:
positionsBySeq[info[0]] = []
positionsBySeq[info[0]].append(int(info[1]))
for seq in list(positionsBySeq.keys()):
# get ranges of contiguous positions to form motifs matches.
for _, g in itertools.groupby(enumerate(positionsBySeq[seq]),
lambda index_item: index_item[0]-index_item[1]):
# get the list of contiguous positions for this range.
matchPos = [v[1] for v in g]
# if motif match is longer than 2 nucleotides.
if max(matchPos) - min(matchPos) > 1:
# add current match to the list of GraphProt matches.
fullSeqID = dynamit.utils.getFullSequenceID(sequences,
sequences[int(seq)][0],
max(matchPos)+1)
processedResults.append(sequences[int(seq)][1][min(matchPos)-1:max(matchPos)] + \
"\tsequence\t" + self.searcherName + "\t" + \
fullSeqID + "\t" + \
str(min(matchPos)) + "\t" + \
str(max(matchPos)+1))
# return processed motifs matches.
return processedResults
except (IOError, IndexError, KeyError, RuntimeError, ValueError) as e:
print(" [GraphProtSearcher] Unexpected error:%s" % str(e))
return 1 | PypiClean |
/AeroSandbox-4.1.1.tar.gz/AeroSandbox-4.1.1/aerosandbox/weights/mass_properties_of_shapes.py | import aerosandbox.numpy as np
from aerosandbox.weights.mass_properties import MassProperties
"""
Most of these relations are taken from:
https://en.wikipedia.org/wiki/List_of_moments_of_inertia
"""
def mass_properties_from_radius_of_gyration(
mass: float,
x_cg: float = 0,
y_cg: float = 0,
z_cg: float = 0,
radius_of_gyration_x: float = 0,
radius_of_gyration_y: float = 0,
radius_of_gyration_z: float = 0,
) -> MassProperties:
"""
Returns the mass properties of an object, given its radius of gyration.
It's assumed that the principle axes of the inertia tensor are aligned with the coordinate axes.
This is a shorthand convenience function for common usage of the MassProperties constructor. For more detailed
use, use the MassProperties object directly.
Args:
mass: Mass [kg]
x_cg: x-position of the center of gravity
y_cg: y-position of the center of gravity
z_cg: z-position of the center of gravity
radius_of_gyration_x: Radius of gyration along the x-axis, about the center of gravity [m]
radius_of_gyration_y: Radius of gyration along the y-axis, about the center of gravity [m]
radius_of_gyration_z: Radius of gyration along the z-axis, about the center of gravity [m]
Returns: MassProperties object.
"""
return MassProperties(
mass=mass,
x_cg=x_cg,
y_cg=y_cg,
z_cg=z_cg,
Ixx=mass * radius_of_gyration_x ** 2,
Iyy=mass * radius_of_gyration_y ** 2,
Izz=mass * radius_of_gyration_z ** 2,
Ixy=0,
Iyz=0,
Ixz=0,
)
def mass_properties_of_ellipsoid(
mass: float,
radius_x: float,
radius_y: float,
radius_z: float,
) -> MassProperties:
"""
Returns the mass properties of an ellipsoid centered on the origin.
Args:
mass: Mass [kg]
radius_x: Radius along the x-axis [m]
radius_y: Radius along the y-axis [m]
radius_z: Radius along the z-axis [m]
Returns: MassProperties object.
"""
return MassProperties(
mass=mass,
x_cg=0,
y_cg=0,
z_cg=0,
Ixx=0.2 * mass * (radius_y ** 2 + radius_z ** 2),
Iyy=0.2 * mass * (radius_z ** 2 + radius_x ** 2),
Izz=0.2 * mass * (radius_x ** 2 + radius_y ** 2),
Ixy=0,
Iyz=0,
Ixz=0,
)
def mass_properties_of_sphere(
mass: float,
radius: float,
) -> MassProperties:
"""
Returns the mass properties of a sphere centered on the origin.
Args:
mass: Mass [kg]
radius: Radius [m]
Returns: MassProperties object.
"""
return mass_properties_of_ellipsoid(
mass=mass,
radius_x=radius,
radius_y=radius,
radius_z=radius
)
def mass_properties_of_rectangular_prism(
mass: float,
length_x: float,
length_y: float,
length_z: float,
) -> MassProperties:
"""
Returns the mass properties of a rectangular prism centered on the origin.
Args:
mass: Mass [kg]
length_x: Side length along the x-axis [m]
length_y: Side length along the y-axis [m]
length_z: Side length along the z-axis [m]
Returns: MassProperties object.
"""
return MassProperties(
mass=mass,
x_cg=0,
y_cg=0,
z_cg=0,
Ixx=1 / 12 * mass * (length_y ** 2 + length_z ** 2),
Iyy=1 / 12 * mass * (length_z ** 2 + length_x ** 2),
Izz=1 / 12 * mass * (length_x ** 2 + length_y ** 2),
Ixy=0,
Iyz=0,
Ixz=0,
)
def mass_properties_of_cube(
mass: float,
side_length: float,
) -> MassProperties:
"""
Returns the mass properties of a cube centered on the origin.
Args:
mass: Mass [kg]
side_length: Side length of the cube [m]
Returns: MassProperties object.
"""
return mass_properties_of_rectangular_prism(
mass=mass,
length_x=side_length,
length_y=side_length,
length_z=side_length,
) | PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/@types/node/globals.d.ts | interface ErrorConstructor {
/** Create .stack property on a target object */
captureStackTrace(targetObject: object, constructorOpt?: Function): void;
/**
* Optional override for formatting stack traces
*
* @see https://v8.dev/docs/stack-trace-api#customizing-stack-traces
*/
prepareStackTrace?: ((err: Error, stackTraces: NodeJS.CallSite[]) => any) | undefined;
stackTraceLimit: number;
}
/*-----------------------------------------------*
* *
* GLOBAL *
* *
------------------------------------------------*/
// For backwards compability
interface NodeRequire extends NodeJS.Require { }
interface RequireResolve extends NodeJS.RequireResolve { }
interface NodeModule extends NodeJS.Module { }
declare var process: NodeJS.Process;
declare var console: Console;
declare var __filename: string;
declare var __dirname: string;
declare var require: NodeRequire;
declare var module: NodeModule;
// Same as module.exports
declare var exports: any;
/**
* Only available if `--expose-gc` is passed to the process.
*/
declare var gc: undefined | (() => void);
//#region borrowed
// from https://github.com/microsoft/TypeScript/blob/38da7c600c83e7b31193a62495239a0fe478cb67/lib/lib.webworker.d.ts#L633 until moved to separate lib
/** A controller object that allows you to abort one or more DOM requests as and when desired. */
interface AbortController {
/**
* Returns the AbortSignal object associated with this object.
*/
readonly signal: AbortSignal;
/**
* Invoking this method will set this object's AbortSignal's aborted flag and signal to any observers that the associated activity is to be aborted.
*/
abort(reason?: any): void;
}
/** A signal object that allows you to communicate with a DOM request (such as a Fetch) and abort it if required via an AbortController object. */
interface AbortSignal extends EventTarget {
/**
* Returns true if this AbortSignal's AbortController has signaled to abort, and false otherwise.
*/
readonly aborted: boolean;
readonly reason: any;
onabort: null | ((this: AbortSignal, event: Event) => any);
throwIfAborted(): void;
}
declare var AbortController: typeof globalThis extends {onmessage: any; AbortController: infer T}
? T
: {
prototype: AbortController;
new(): AbortController;
};
declare var AbortSignal: typeof globalThis extends {onmessage: any; AbortSignal: infer T}
? T
: {
prototype: AbortSignal;
new(): AbortSignal;
abort(reason?: any): AbortSignal;
timeout(milliseconds: number): AbortSignal;
};
//#endregion borrowed
//#region ArrayLike.at()
interface RelativeIndexable<T> {
/**
* Takes an integer value and returns the item at that index,
* allowing for positive and negative integers.
* Negative integers count back from the last item in the array.
*/
at(index: number): T | undefined;
}
interface String extends RelativeIndexable<string> {}
interface Array<T> extends RelativeIndexable<T> {}
interface ReadonlyArray<T> extends RelativeIndexable<T> {}
interface Int8Array extends RelativeIndexable<number> {}
interface Uint8Array extends RelativeIndexable<number> {}
interface Uint8ClampedArray extends RelativeIndexable<number> {}
interface Int16Array extends RelativeIndexable<number> {}
interface Uint16Array extends RelativeIndexable<number> {}
interface Int32Array extends RelativeIndexable<number> {}
interface Uint32Array extends RelativeIndexable<number> {}
interface Float32Array extends RelativeIndexable<number> {}
interface Float64Array extends RelativeIndexable<number> {}
interface BigInt64Array extends RelativeIndexable<bigint> {}
interface BigUint64Array extends RelativeIndexable<bigint> {}
//#endregion ArrayLike.at() end
/**
* @since v17.0.0
*
* Creates a deep clone of an object.
*/
declare function structuredClone<T>(
value: T,
transfer?: { transfer: ReadonlyArray<import('worker_threads').TransferListItem> },
): T;
/*----------------------------------------------*
* *
* GLOBAL INTERFACES *
* *
*-----------------------------------------------*/
declare namespace NodeJS {
interface CallSite {
/**
* Value of "this"
*/
getThis(): unknown;
/**
* Type of "this" as a string.
* This is the name of the function stored in the constructor field of
* "this", if available. Otherwise the object's [[Class]] internal
* property.
*/
getTypeName(): string | null;
/**
* Current function
*/
getFunction(): Function | undefined;
/**
* Name of the current function, typically its name property.
* If a name property is not available an attempt will be made to try
* to infer a name from the function's context.
*/
getFunctionName(): string | null;
/**
* Name of the property [of "this" or one of its prototypes] that holds
* the current function
*/
getMethodName(): string | null;
/**
* Name of the script [if this function was defined in a script]
*/
getFileName(): string | undefined;
/**
* Current line number [if this function was defined in a script]
*/
getLineNumber(): number | null;
/**
* Current column number [if this function was defined in a script]
*/
getColumnNumber(): number | null;
/**
* A call site object representing the location where eval was called
* [if this function was created using a call to eval]
*/
getEvalOrigin(): string | undefined;
/**
* Is this a toplevel invocation, that is, is "this" the global object?
*/
isToplevel(): boolean;
/**
* Does this call take place in code defined by a call to eval?
*/
isEval(): boolean;
/**
* Is this call in native V8 code?
*/
isNative(): boolean;
/**
* Is this a constructor call?
*/
isConstructor(): boolean;
}
interface ErrnoException extends Error {
errno?: number | undefined;
code?: string | undefined;
path?: string | undefined;
syscall?: string | undefined;
}
interface ReadableStream extends EventEmitter {
readable: boolean;
read(size?: number): string | Buffer;
setEncoding(encoding: BufferEncoding): this;
pause(): this;
resume(): this;
isPaused(): boolean;
pipe<T extends WritableStream>(destination: T, options?: { end?: boolean | undefined; }): T;
unpipe(destination?: WritableStream): this;
unshift(chunk: string | Uint8Array, encoding?: BufferEncoding): void;
wrap(oldStream: ReadableStream): this;
[Symbol.asyncIterator](): AsyncIterableIterator<string | Buffer>;
}
interface WritableStream extends EventEmitter {
writable: boolean;
write(buffer: Uint8Array | string, cb?: (err?: Error | null) => void): boolean;
write(str: string, encoding?: BufferEncoding, cb?: (err?: Error | null) => void): boolean;
end(cb?: () => void): this;
end(data: string | Uint8Array, cb?: () => void): this;
end(str: string, encoding?: BufferEncoding, cb?: () => void): this;
}
interface ReadWriteStream extends ReadableStream, WritableStream { }
interface RefCounted {
ref(): this;
unref(): this;
}
type TypedArray =
| Uint8Array
| Uint8ClampedArray
| Uint16Array
| Uint32Array
| Int8Array
| Int16Array
| Int32Array
| BigUint64Array
| BigInt64Array
| Float32Array
| Float64Array;
type ArrayBufferView = TypedArray | DataView;
interface Require {
(id: string): any;
resolve: RequireResolve;
cache: Dict<NodeModule>;
/**
* @deprecated
*/
extensions: RequireExtensions;
main: Module | undefined;
}
interface RequireResolve {
(id: string, options?: { paths?: string[] | undefined; }): string;
paths(request: string): string[] | null;
}
interface RequireExtensions extends Dict<(m: Module, filename: string) => any> {
'.js': (m: Module, filename: string) => any;
'.json': (m: Module, filename: string) => any;
'.node': (m: Module, filename: string) => any;
}
interface Module {
/**
* `true` if the module is running during the Node.js preload
*/
isPreloading: boolean;
exports: any;
require: Require;
id: string;
filename: string;
loaded: boolean;
/** @deprecated since v14.6.0 Please use `require.main` and `module.children` instead. */
parent: Module | null | undefined;
children: Module[];
/**
* @since v11.14.0
*
* The directory name of the module. This is usually the same as the path.dirname() of the module.id.
*/
path: string;
paths: string[];
}
interface Dict<T> {
[key: string]: T | undefined;
}
interface ReadOnlyDict<T> {
readonly [key: string]: T | undefined;
}
} | PypiClean |
/ANNOgesic-1.1.14.linux-x86_64.tar.gz/usr/local/lib/python3.10/dist-packages/annogesiclib/circrna.py | import os
import sys
import time
import shutil
import copy
from glob import glob
from subprocess import call, Popen
from annogesiclib.multiparser import Multiparser
from annogesiclib.helper import Helper
from annogesiclib.converter import Converter
from annogesiclib.circRNA_detection import detect_circrna
class CircRNADetection(object):
'''Detection of circRNA'''
def __init__(self, args_circ):
self.multiparser = Multiparser()
self.helper = Helper()
self.converter = Converter()
self.alignment_path = os.path.join(args_circ.output_folder,
"segemehl_alignment_files")
self.splice_path = os.path.join(args_circ.output_folder,
"segemehl_splice_results")
self.candidate_path = os.path.join(args_circ.output_folder,
"circRNA_tables")
self.gff_folder = os.path.join(args_circ.output_folder, "gffs")
self.gff_path = os.path.join(args_circ.gffs, "tmp")
self.splices = {"file": "splicesites.bed",
"splice": "splicesites"}
self.trans = {"file": "transrealigned.bed",
"trans": "transrealigned"}
self.fasta_path = os.path.join(args_circ.fastas, "tmp")
def _wait_process(self, processes):
'''wait for the parallels to finish the process'''
for p in processes:
p.wait()
if p.stdout:
p.stdout.close()
if p.stdin:
p.stdin.close()
if p.stderr:
p.stderr.close()
try:
p.kill()
except OSError:
pass
time.sleep(5)
def _deal_zip_file(self, read_files, log):
tmp_datas = []
tmp_reads = []
for reads in read_files:
zips = []
tmp_datas = reads["files"]
for read in reads["files"]:
if read.endswith(".bz2"):
mod_read = read.replace(".bz2", "")
if (".fa" not in mod_read) and (
".fasta" not in mod_read) and (
".fna" not in mod_read) and (
".fq" not in mod_read) and (
".fastq" not in mod_read):
mod_read = mod_read + ".fa"
read_out = open(mod_read, "w")
tmp_datas.append(mod_read)
zips.append(mod_read)
print(" ".join(["Uncompressing", read]))
log.write(" ".join(["bzcat", read]) + "\n")
call(["bzcat", read], stdout=read_out)
log.write("\t" + mod_read + " is generated.\n")
read_out.close()
elif read.endswith(".gz"):
mod_read = read.replace(".gz", "")
if (".fa" not in mod_read) and (
".fasta" not in mod_read) and (
".fna" not in mod_read) and (
".fq" not in mod_read) and (
".fastq" not in mod_read):
mod_read = mod_read + ".fa"
read_out = open(mod_read, "w")
tmp_datas.append(mod_read)
zips.append(mod_read)
print(" ".join(["Uncompressing", read]))
log.write(" ".join(["zcat", read]) + "\n")
call(["zcat", read], stdout=read_out)
read_out.close()
log.write("\t" + mod_read + " is generated.\n")
tmp_reads.append({"sample": reads["sample"],
"files": tmp_datas, "zips": zips})
return tmp_reads
def _run_segemehl_fasta_index(self, segemehl_path, fasta_path,
index, fasta, log):
log.write(" ".join([segemehl_path,
"-x", os.path.join(fasta_path, index),
"-d", os.path.join(fasta_path, fasta)]) + "\n")
call([segemehl_path,
"-x", os.path.join(fasta_path, index),
"-d", os.path.join(fasta_path, fasta)])
def _run_segemehl_align(self, args_circ, index, fasta, read,
sam_file, log_file, fasta_prefix, log):
out = open(os.path.join(self.alignment_path,
fasta_prefix, sam_file), "w")
log = open(os.path.join(self.alignment_path,
fasta_prefix, log_file), "w")
log.write(" ".join([args_circ.segemehl_path,
"-i", os.path.join(self.fasta_path, index),
"-d", os.path.join(self.fasta_path, fasta),
"-q", read, "-S"]) + "\n")
p = Popen([args_circ.segemehl_path,
"-i", os.path.join(self.fasta_path, index),
"-d", os.path.join(self.fasta_path, fasta),
"-q", read, "-S"],
stdout=out, stderr=log)
return p
def _align(self, args_circ, read_datas, log):
'''align the read. if the bam files are provided, it can be skipped.'''
prefixs = []
align_files = []
log.write("Using segemehl to align the read.\n")
log.write("Please make sure the version of segemehl is at least 0.1.9.\n")
for fasta in os.listdir(self.fasta_path):
index = fasta.replace(".fa", ".idx")
self._run_segemehl_fasta_index(args_circ.segemehl_path,
self.fasta_path, index, fasta, log)
processes = []
num_process = 0
fasta_prefix = fasta.replace(".fa", "")
prefixs.append(fasta_prefix)
self.helper.check_make_folder(os.path.join(
self.alignment_path, fasta_prefix))
log.write("Running for {0}.\n".format(fasta_prefix))
for reads in read_datas:
for read in reads["files"]:
num_process += 1
read_name = read.split("/")[-1]
if read_name.endswith(".fa") or \
read_name.endswith(".fna") or \
read_name.endswith(".fasta") or \
read_name.endswith(".fq") or \
read_name.endswith(".fastq"):
filename = read_name.split(".")
read_prefix = ".".join(filename[:-1])
sam_file = "_".join([read_prefix, fasta_prefix + ".sam"])
log_file = "_".join([read_prefix, fasta_prefix + ".log"])
align_files.append("_".join([read_prefix, fasta_prefix]))
print("Mapping {0}".format(sam_file))
p = self._run_segemehl_align(
args_circ, index, fasta, read,
sam_file, log_file, fasta_prefix, log)
processes.append(p)
if num_process == args_circ.cores:
self._wait_process(processes)
num_process = 0
self._wait_process(processes)
log.write("Done!\n")
log.write("The following files are generated in {0}:\n".format(
os.path.join(self.alignment_path, fasta_prefix)))
for file_ in os.listdir(os.path.join(
self.alignment_path, fasta_prefix)):
log.write("\t" + file_ + "\n")
return align_files, prefixs
def _run_samtools_convert_bam(self, samtools_path, pre_sam, out_bam, log):
log.write(" ".join([samtools_path, "view",
"-bS", pre_sam, "-o", out_bam]) + "\n")
call([samtools_path, "view", "-bS", pre_sam, "-o", out_bam])
def _convert_sam2bam(self, sub_alignment_path, samtools_path, align_files, log):
bam_files = []
convert_ones = []
remove_ones = []
log.write("Using Samtools to convert SAM files to BAM files.\n")
log.write("Please make sure the version of Samtools is at least 1.3.1.\n")
for sam in os.listdir(sub_alignment_path):
pre_sam = os.path.join(sub_alignment_path, sam)
if sam.endswith(".sam"):
bam_file = sam.replace(".sam", ".bam")
print("Converting {0} to {1}".format(sam, bam_file))
out_bam = os.path.join(sub_alignment_path, bam_file)
self._run_samtools_convert_bam(samtools_path, pre_sam,
out_bam, log)
bam_files.append(out_bam)
if align_files:
if bam_file.replace(".bam", "") not in align_files:
convert_ones.append(out_bam)
else:
remove_ones.append(pre_sam)
elif sam.endswith(".bam"):
if (pre_sam not in convert_ones) and (
pre_sam not in remove_ones):
bam_files.append(pre_sam)
elif sam.endswith(".log"):
os.remove(pre_sam)
log.write("Done!\n")
log.write("The following files are generated:\n")
for file_ in os.listdir(sub_alignment_path):
if file_.endswith(".bam"):
log.write("\t" + os.path.join(sub_alignment_path, file_) + "\n")
return bam_files, convert_ones, remove_ones
def _run_samtools_merge_sort(self, samtools_path, prefix,
out_folder, bam_datas, log):
log.write("Using Samtools for merging, sorting and converting "
"the BAM files.\n")
log.write("Make sure the version Samtools is at least 1.3.1.\n")
for bam_data in bam_datas:
print("Merging bam files for {0} of {1}".format(
prefix, bam_data["sample"]))
sample_bam = os.path.join(out_folder, "_".join([
prefix, bam_data["sample"] + ".bam"]))
if len(bam_data["files"]) <= 1:
shutil.copyfile(bam_data["files"][0], sample_bam)
else:
file_line = " ".join(bam_data["files"])
log.write(" ".join([samtools_path, "merge",
sample_bam, file_line]) + "\n")
os.system(" ".join([samtools_path, "merge",
sample_bam, file_line]))
print("Sorting bam files for {0} of {1}".format(
prefix, bam_data["sample"]))
sort_sample = os.path.join(out_folder,
"_".join([prefix, bam_data["sample"] + "_sort.bam"]))
log.write(" ".join([samtools_path, "sort",
"-o", sort_sample, sample_bam]) + "\n")
call([samtools_path, "sort", "-o", sort_sample, sample_bam])
os.remove(sample_bam)
print("Converting bam files to sam files for {0} of {1}".format(
prefix, bam_data["sample"]))
log.write(" ".join([samtools_path, "view", "-h", "-o",
sort_sample.replace(".bam", ".sam"), sort_sample]) + "\n")
call([samtools_path, "view", "-h", "-o",
sort_sample.replace(".bam", ".sam"), sort_sample])
log.write("Done!\n")
log.write("\t" + sort_sample.replace(".bam", ".sam") + " is generated.\n")
def _merge_sort_aligment_file(
self, bam_datas, read_datas, samtools_path,
out_folder, convert_ones, tmp_reads, remove_ones, prefix, log):
if bam_datas is None:
merge_bam_datas = []
for read_data in read_datas:
bam_files = []
for read in read_data["files"]:
if read.endswith(".gz") or read.endswith(".bz2"):
read = ".".join(
read.split("/")[-1].split(".")[:-1])
read_prefix = ".".join(
read.split("/")[-1].split(".")[:-1])
bam_files.append(os.path.join(
self.alignment_path, prefix,
"_".join([read_prefix, prefix + ".bam"])))
merge_bam_datas.append({"sample": read_data["sample"],
"files": bam_files})
elif (bam_datas is not None) and (read_datas is not None):
merge_bam_datas = copy.deepcopy(bam_datas)
for bam_data in merge_bam_datas:
for read_data in read_datas:
if bam_data["sample"] == read_data["sample"]:
for read in read_data["files"]:
read_prefix = ".".join(
read.split("/")[-1].split(".")[:-1])
bam = os.path.join(
self.alignment_path, prefix,
"_".join([read_prefix, prefix + ".bam"]))
if (bam not in bam_data["files"]):
bam_data["files"].append(bam)
else:
merge_bam_datas = copy.deepcopy(bam_datas)
self._run_samtools_merge_sort(samtools_path, prefix,
out_folder, merge_bam_datas, log)
for bam in convert_ones:
os.remove(bam)
for sam in remove_ones:
os.remove(sam)
def _run_testrealign(self, prefix, testrealign_path, out_folder, log):
log.write("Using Segemehl to detect circular RNAs.\n")
log.write("Please make sure the version of Segemehl is at least 0.1.9.\n")
log.write("Please make sure your testrealign.x exists. If it does not "
"exists, please reinstall your Segemehl via using make all.\n")
sub_splice_path = os.path.join(self.splice_path, prefix)
if not os.path.exists(sub_splice_path):
os.mkdir(sub_splice_path)
err_log = os.path.join(sub_splice_path, prefix + ".log")
print("Running testrealign.x for {0}".format(prefix))
for sam_file in os.listdir(out_folder):
if sam_file.endswith("sort.sam"):
sample_prefix = sam_file.replace("_sort.sam", "")
command = " ".join([
testrealign_path,
"-d", os.path.join(self.fasta_path, prefix + ".fa"),
"-q", os.path.join(out_folder, sam_file), "-n",
"-U", os.path.join(sub_splice_path,
sample_prefix + "_splicesites.bed"),
"-T", os.path.join(sub_splice_path,
sample_prefix + "_transrealigned.bed")])
log.write(command + " 2>" + err_log + "\n")
os.system(command + " 2>" + err_log)
log.write("Done!\n")
log.write("The following files are generated:\n")
for file_ in os.listdir(sub_splice_path):
log.write("\t" + os.path.join(sub_splice_path, file_) + "\n")
self.helper.remove_all_content(out_folder, ".sam", "file")
def _merge_bed(self, fastas, splice_path, output_folder):
'''Merge the bed files for analysis'''
fa_prefixs = []
for fasta in os.listdir(fastas):
headers = []
if (fasta.endswith(".fa") or fasta.endswith(".fna") or
fasta.endswith(".fasta")):
with open(os.path.join(fastas, fasta), "r") as f_h:
for line in f_h:
line = line.strip()
if line.startswith(">"):
headers.append(line[1:])
filename = fasta.split(".")
fasta_prefix = ".".join(filename[:-1])
fa_prefixs.append(fasta_prefix)
bed_folder = os.path.join(
output_folder, fasta_prefix)
self.helper.check_make_folder(bed_folder)
samples = []
for header in headers:
for splice in os.listdir(os.path.join(
splice_path, header)):
if splice.endswith(".bed"):
if self.splices["file"] in splice:
sample = splice.replace(header, "")
sample = sample.replace(
self.splices["file"], "")
if sample not in samples:
samples.append(sample)
shutil.copyfile(
os.path.join(
splice_path, header, splice),
os.path.join(
bed_folder, "tmp_" + splice))
for sample in samples:
out_splice = os.path.join(bed_folder, "".join([
fasta_prefix + sample + self.splices["file"]]))
out_trans = os.path.join(bed_folder, "".join([
fasta_prefix + sample + self.trans["file"]]))
if os.path.exists(out_splice):
os.remove(out_splice)
if os.path.exists(out_trans):
os.remove(out_trans)
for file_ in os.listdir(bed_folder):
if (self.splices["splice"] in file_) and (
sample in file_):
self.helper.merge_file(os.path.join(
bed_folder, file_), out_splice)
elif (self.trans["trans"] in file_) and (
sample in file_):
self.helper.merge_file(os.path.join(
bed_folder, file_), out_trans)
self.helper.remove_all_content(splice_path, None, "dir")
return samples, fa_prefixs
def _stat_and_gen_gff(self, prefixs, samples, args_circ, log):
'''do statistics and print the result to gff file'''
log.write("Running circRNA.py to do statistics and generate gff files.\n")
log.write("The following files are generated:\n")
for prefix in prefixs:
self.helper.check_make_folder(os.path.join(self.gff_folder,
prefix))
self.helper.check_make_folder(os.path.join(self.splice_path,
prefix))
for bed in os.listdir(os.path.join(
args_circ.output_folder, prefix)):
if (bed.split("_")[0] != "tmp") and (bed.endswith(".bed")):
shutil.copy(
os.path.join(args_circ.output_folder, prefix, bed),
os.path.join(self.splice_path, prefix))
self.helper.check_make_folder(os.path.join(
self.candidate_path, prefix))
print("Comparing circular RNAs with annotations of {0}".format(
prefix))
for sample in samples:
splice_file = os.path.join(
self.splice_path, prefix,
"".join([prefix, sample, self.splices["file"]]))
stat_file = os.path.join(args_circ.stat_folder,
"".join(["stat_", prefix, sample,
"circRNA.csv"]))
csv_all = os.path.join(self.candidate_path, prefix,
"".join([prefix, sample, "circRNA_all.csv"]))
csv_best = os.path.join(self.candidate_path, prefix,
"".join([prefix, sample, "circRNA_best.csv"]))
gff_all = os.path.join(self.gff_folder, prefix,
"".join([prefix, sample, "circRNA_all.gff"]))
gff_best = os.path.join(self.gff_folder, prefix,
"".join([prefix, sample, "circRNA_best.gff"]))
detect_circrna(splice_file, os.path.join(
self.gff_path, prefix + ".gff"), csv_all,
args_circ, stat_file)
self.converter.convert_circ2gff(
os.path.join(self.candidate_path, prefix,
"".join([prefix, sample, "circRNA_all.csv"])),
args_circ, gff_all, gff_best)
log.write("\t" + stat_file + "\n")
log.write("\t" + csv_all + "\n")
log.write("\t" + csv_best + "\n")
log.write("\t" + gff_all + "\n")
log.write("\t" + gff_best + "\n")
def _extract_input_files(self, inputs):
input_datas = []
for input_ in inputs:
datas = input_.split(":")
if len(datas) != 2:
print("Error: the format of --bam_files or "
"--read_files is wrong!")
sys.exit()
for file_ in datas[-1].split(","):
if not os.path.exists(file_):
print("Error: some files in --bam_files or "
"--read_files do not exist!")
sys.exit()
input_datas.append({"sample": datas[0],
"files": datas[-1].split(",")})
return input_datas
def _combine_read_bam(self, bam_files, bam_datas, read_datas):
if bam_datas is not None:
for bam_data in bam_datas:
for read_data in read_datas:
if bam_data["sample"] == read_data["sample"]:
for read in read_data["files"]:
prefix = ".".join(
read.split("/")[-1].split(".")[:-1])
bam = os.path.join(self.alignment_path,
prefix + ".bam")
if (bam in bam_files) and (
bam not in bam_data["files"]):
bam_data["files"].append(bam)
else:
bam_datas = []
for read_data in read_datas:
bam_files = []
for read in read_data["files"]:
prefix = ".".join(
read.split("/")[-1].split(".")[:-1])
bam_files.append(os.path.join(
self.alignment_path, prefix + ".bam"))
bam_datas.append({"sample": read_data["sample"],
"files": bam_files})
return bam_datas
def _remove_tmp_files(self, args_circ, fa_prefixs):
self.helper.remove_tmp_dir(args_circ.fastas)
self.helper.remove_tmp_dir(args_circ.gffs)
self.helper.remove_all_content(args_circ.output_folder,
".bam", "file")
for prefix in fa_prefixs:
shutil.rmtree(os.path.join(args_circ.output_folder, prefix))
def run_circrna(self, args_circ, log):
'''detection of circRNA'''
bam_datas = None
read_datas = None
if (args_circ.bams is None) and (args_circ.read_files is None):
log.write("--bam_files and --read_files can not be both emtpy.\n")
print("Error: --bam_files or --read_files should be assigned.")
sys.exit()
if args_circ.bams is not None:
bam_datas = self._extract_input_files(args_circ.bams)
if args_circ.read_files is not None:
read_datas = self._extract_input_files(args_circ.read_files)
for gff in os.listdir(args_circ.gffs):
if gff.endswith(".gff"):
self.helper.check_uni_attributes(os.path.join(
args_circ.gffs, gff))
if args_circ.segemehl_path is None:
log.write("segemehl does not exists.\n")
print("Error: please assign segemehl path!!")
sys.exit()
self.multiparser.parser_fasta(args_circ.fastas)
self.multiparser.parser_gff(args_circ.gffs, None)
self.multiparser.combine_gff(args_circ.fastas, self.gff_path,
"fasta", None)
tmp_reads = []
if args_circ.read_files:
log.write("Raw read files are found.\n")
tmp_reads = self._deal_zip_file(read_datas, log)
align_files, prefixs = self._align(args_circ, tmp_reads, log)
else:
align_files = None
prefixs = []
for fasta in os.listdir(self.fasta_path):
if fasta.endswith(".fa"):
fasta_prefix = fasta.replace(".fa", "")
prefixs.append(fasta_prefix)
for prefix in prefixs:
if args_circ.read_files:
sub_alignment_path = os.path.join(self.alignment_path, prefix)
bam_files, convert_ones, remove_ones = self._convert_sam2bam(
sub_alignment_path, args_circ.samtools_path, align_files, log)
else:
convert_ones = []
remove_ones = []
self._merge_sort_aligment_file(
bam_datas, read_datas, args_circ.samtools_path,
args_circ.output_folder,
convert_ones, tmp_reads, remove_ones, prefix, log)
self._run_testrealign(prefix, args_circ.testrealign_path,
args_circ.output_folder, log)
samples, fa_prefixs = self._merge_bed(
args_circ.fastas, self.splice_path, args_circ.output_folder)
self._stat_and_gen_gff(fa_prefixs, samples, args_circ, log)
if len(tmp_reads) != 0:
for reads in tmp_reads:
for read in reads["zips"]:
os.remove(read)
self._remove_tmp_files(args_circ, fa_prefixs) | PypiClean |
/Lokai-0.3.tar.gz/Lokai-0.3/lokai/lk_worker/nodes/search.py |
#-----------------------------------------------------------------------
from types import StringTypes
from collections import deque
from sqlalchemy import and_, not_, or_, exists
from sqlalchemy.orm import aliased
from lokai.tool_box.tb_database.orm_interface import engine
from lokai.lk_worker.models import ndNode, ndEdge, ndParent
from lokai.lk_worker.nodes.graph import (top_trees,
child_trees,
)
from lokai.lk_worker.nodes.data_interface import get_node_from_reference
#-----------------------------------------------------------------------
def _make_candidates(*args):
""" Join together a set of potential candidates to form a single
candiate list.
An empty list is significant. It means the list is empty
(implying that a search on this list will return empty). A
'None' value is taken as undefined.
"""
defined = False
op = []
for possible in args:
if possible is not None:
defined = True
if isinstance(possible, list):
op.extend(possible)
else:
op.append(possible)
if defined:
return op
return None
#-----------------------------------------------------------------------
def search_down(source_selection, tops=None, depth_first=True):
"""Generate a search for nodes at or below each of the nodes
listed in tops and matching some criterion.
source_selection is an ORM query generated by
engine.session.query()... or something similar.
The result is a query that can be used with one(), all() and so
on.
If depth first is false, the query returns all nodes matching
in the set of sub-trees.
If depth first is true, the query returns the first matching
node in each path through the sub-treees.
Depth first search is achieved in two steps: one finds all the
nodes matching the criterion and the other eliminates any nodes
in the result that have an ancestor node also in the result.
Warning:
The search_down function does not work if there is no ndNode
object in the query. The query is generated, and will provide a
result, but the result will not be correct.
"""
#
# Add the tops[] filter ...
base_query = source_selection
candidates = _make_candidates(tops)
if candidates is not None:
base_query = base_query.outerjoin(
(ndParent, ndParent.nde_idx == ndNode.nde_idx)).filter(
or_(ndParent.nde_parent.in_(candidates),
ndNode.nde_idx.in_(candidates)
)
).distinct()
if not depth_first:
return base_query
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#
# Find a version of the query that can be used in the depth_first filter
#-------------------------------------------------------------------
ndParentInner = aliased(ndParent)
subx = base_query.from_self().outerjoin(
(ndParentInner, ndParentInner.nde_parent ==
ndNode.nde_idx)).add_entity(ndParentInner)
subx = subx.statement.alias('xxx')
base_query = base_query.filter(
not_(exists(['*'],
and_(ndNode.nde_idx == subx.c.nde_idx))))
return base_query
#-----------------------------------------------------------------------
def search_up(source_selection, tops=None):
"""Generate a search for nodes at or above each of the nodes
listed in tops and matching some criterion.
The search returns all nodes matching the original query in all
paths up to the terminating root nodes.
All it has to do is to look in the ndParent entries for each
entry in tops to find a complete set of backward paths.
"""
base_query = source_selection
candidates = _make_candidates(tops)
if candidates is None: # not the same as empty list
candidates = top_trees()
#
# Create a sub-select for parents
ndParentInner = aliased(ndParent)
ndNodeInner = aliased(ndNode)
subx = engine.session.query(ndParentInner.nde_parent).filter(
ndParentInner.nde_idx.in_(candidates)
).union(engine.session.query(ndNodeInner.nde_idx).filter(
ndNodeInner.nde_idx.in_(candidates)))
base_query = base_query.filter(
and_(ndNode.nde_idx.in_(subx)))
return base_query
#-----------------------------------------------------------------------
def search_here(source_selection, tops=None):
"""Apply the search to all the nodes in tops.
Return a modified query.
If tops is empty, apply the search to root nodes of the forest.
"""
candidates = _make_candidates(tops)
if candidates is None: # not the same as empty list
candidates = top_trees()
base_query = source_selection.filter(
ndNode.nde_idx.in_(candidates))
return base_query
#-----------------------------------------------------------------------
def search_children(source_selection, tops=None):
"""Apply the search to all the children of the nodes in tops.
Return a modified query.
If tops is empty, apply the search to root nodes of the forest.
"""
candidates = _make_candidates(tops)
if candidates is None: # not the same as empty list
candidates = top_trees()
base_query = source_selection.filter(
and_(ndEdge.nde_parent.in_(candidates),
ndNode.nde_idx == ndEdge.nde_child))
return base_query
#-----------------------------------------------------------------------
def search_ordered(source_selection, flow, sort_column=None, sort_object=None):
""" Apply an 'order by' using the sort object in the flow direction
"""
query = source_selection
if sort_column:
if flow == 'ASC':
query = query.order_by(
getattr(sort_object, sort_column).asc())
else:
query = query.order_by(
getattr(sort_object, sort_column).desc())
if sort_object != ndNode or sort_column != 'nde_idx':
# Add a default sub-ordering
if flow == 'ASC':
query = query.order_by(ndNode.nde_idx.asc())
else:
query = query.order_by(ndNode.nde_idx.desc())
return query
#-----------------------------------------------------------------------
def _new_candidates(search_function, search_query, candidates, **kwargs):
"""
Return a set of new candidates by applying the search_query to the
given candidates using the search_function.
"""
result_set = search_function(search_query, candidates, **kwargs).all()
op = []
for result in result_set:
op.append(result.nde_idx)
return op
#-----------------------------------------------------------------------
def _make_select(element):
"""
Build a search query based on the given name or part name.
"""
name_search = engine.session.query(ndNode)
if '%' in element:
name_search = name_search.filter(ndNode.nde_name.like(element))
else:
name_search = name_search.filter(ndNode.nde_name == element)
return name_search
#-----------------------------------------------------------------------
def find_in_path(path, tops=None):
"""
Search the given candidate trees for nodes that match the given
path specification.
The path specification is a psuedo path made up of one or more
of three element types.
A node is returned if the rightmost element matches the node
name and the route to the node matches the preceding left part
of the specification.
Path elements are:
'*' matches all the immediate children of each current candiate
node.
'**' matches zero or more levels of decendents. If this
appears at the end of the path it matches the immediate
children only.
name matches the name of a node.
If the path begins with '/' the match starts with the given set of
candidates.
If the path does not begin with '/' the search is done as though
the path starts with /**/
A trailing '/' is ignored.
Examples;
/foo searches the given candidates for one called foo
foo searches the given candidates and all descendents to find
the first match on each branch.
foo/**/bar matches foo/bar, foo/a/bar, foo/a/b/bar and so on.
foo/*/bar matches foo/a/bar but not foo/bar and not foo/a/b/bar
foo/**/*/bar is the same as foo/*/bar
foo/*/**/bar skips one level of descendents
foo/* matches foo and returns all the children of foo
foo/** matches foo and returns all the children of foo
Path can be a list instead of a delimited string. An empty first
element is equivalent to a rooted path.
"""
if len(path) == 0:
#
# Nothing to search for
return []
candidates = _make_candidates(tops)
if candidates is None: # not the same as empty list
candidates = top_trees()
if isinstance(path, (type(''), type(u''))):
path = path.split('/')
# If the path starts with a '/' then we search exactly as given.
if path[0] == '':
path_head = path[0]
path_tail = path[1:]
else:
# Path is not rooted, so we search all the way down
path_head = '**'
path_tail = path
# If the path ends with a '/' we are confused because the meaning
# is not obvious. So we just remove the last bit.
if path_tail[-1] == '':
del path_tail[-1]
while candidates:
#
# Exit by a return statement when all elements have been processed
if path_head == '**':
if len(path_tail) == 0:
return candidates
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
path_head = path_tail[0]
path_tail = path_tail[1:]
if path_head not in ['*', '**']:
candidates = _new_candidates(search_down,
_make_select(path_head),
candidates)
elif path_head == '*':
if len(path_tail) == 0:
return candidates
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
path_head = path_tail[0]
path_tail = path_tail[1:]
#
# candidates currently holds nodes that match where we are
# in the path. We have now isolated the next item in the
# path, so we need to apply this next item to the children
# of the children of the currently matching nodes.
candidates = child_trees(candidates)
#
elif path_head == '':
if len(path_tail) == 0:
return candidates
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
path_head = path_tail[0]
path_tail = path_tail[1:]
else:
candidates = _new_candidates(search_here,
_make_select(path_head),
candidates)
if not candidates:
# Did not find explicit item - fail
return candidates
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
if len(path_tail) == 0:
return candidates
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
path_head = path_tail[0]
path_tail = path_tail[1:]
#
# candidates currently holds nodes that match where we are
# in the path. We have now isolated the next item in the
# path, so we need to apply this next item to the children
# of the currently matching nodes.
candidates = child_trees(candidates)
return candidates
#-----------------------------------------------------------------------
def find_nodes_by_client_references(refs):
""" Takes a sequence of client references and returns a list of
nodes
"""
if not refs:
return [] #>>>>>>>>>>>>>>>>>>>>
if isinstance(refs, (type(''), type(u''))):
refs = [refs]
return (engine.session.query(ndNode)
.filter(ndNode.nde_client_reference.in_(refs)).all())
#-----------------------------------------------------------------------
def find_from_string(given_string, tops=None, expand=None):
""" Find a node from the given proto-identifier. This might be an
idx, a client reference, a name or a path.
Try each possibility in order of specificity.
:given_string: A possible identifier.
For wildcard name or path, the wildcards are SQL wild
cards: % and _
If the given string is not a direct identifier (nde_idx or
client reference) it is assumed to be a path of node names
and is passed to find_in_path.
:tops: A list of nde_idx values defining the sub-trees to be
searched. If ``tops`` is empty then all possible trees are
searched.
:expand: Optionally, expand=True, the path can be expanded to
include '**' elements between all the parts of the
path. The resulting path is then independent of the
ancestral distance between the nodes found.
"""
candidates = []
if given_string:
nde_idx = get_node_from_reference(given_string)
if nde_idx is not None:
candidates = [nde_idx]
else:
search_path = given_string
if expand:
path_elements = given_string.split('/')
search_path = '/**/'.join(path_elements)
candidates = find_in_path(search_path, tops)
return candidates
#-----------------------------------------------------------------------
def search_nearest_up(given_idx, objective_function):
""" Search breadth first _up_ the graph to find the nearest node
that satisfies the objective function.
The search returns zero, one or more than one matching
node. If there is more then one node at equal distance from
the starting point then all of these nodes are returned.
The search also returns a count indicating the distance of the
found items from the startng point.
:given_idx: string - A node identifier for the start of the
search.
The test is applied to this node. The search could
terminate immediately without going further through the
graph.
:objective_function: function - a test function that is called
for each node in the search path. The search terminates
when the function returns True.
test = objective_function(node_object)
The test criteria are not given. We assume that the
objective function is specific for the envisaged test. A
closure would be a good way of generating such.
Return ([ ... ndNode ...], distance)
"""
distance = 0
work_list = set([given_idx])
while work_list:
found_list = []
qy = engine.session.query(
ndNode,
ndEdge
).outerjoin(
(ndEdge, (ndEdge.nde_child == ndNode.nde_idx))
).filter(
ndNode.nde_idx.in_(work_list)
)
node_list = qy.all()
found_list = set([x[0] for
x in node_list if objective_function(x[0])])
if found_list:
break
work_list = set([x[1].nde_parent for
x in node_list if x[1] and x[1].nde_parent])
distance += 1
return (list(found_list), distance)
#----------------------------------------------------------------------- | PypiClean |
/FileCrawler-0.1.8.tar.gz/FileCrawler-0.1.8/filecrawler/rules/aws.py | import re
from filecrawler.rulebase import RuleBase
class AWS(RuleBase):
def __init__(self):
super().__init__('aws-access-token', 'AWS')
self._regex = re.compile(r"(?<![A-Z0-9])(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}")
self._keywords = ["AKIA",
"AGPA",
"AIDA",
"AROA",
"AIPA",
"ANPA",
"ANVA",
"ASIA",
]
self._exclude_keywords = [
"EXAMPLE" # AKIAIOSFODNN7EXAMPLE
]
self._fp_regex = re.compile(r"[A-Z0-9]{1,3}(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}[A-Z0-9]{1,3}")
self._tps = [self.generate_sample_secret("AWS", "AKIALALEMEL33243OLIB")]
def post_processor(self, original_data: str, found: str) -> dict:
try:
p = re.compile(
r"(?<![A-Za-z0-9/+=])[A-Za-z0-9/+=]{40}(?![A-Za-z0-9/+=])")
pr = re.compile(r"(us(-gov)?|ap|ca|cn|eu|sa)-(central|(north|south)?(east|west)?)-\d")
hex_p = re.compile(r"[a-fA-F0-9]+")
start = original_data.find(found) - 200
if start < 0:
start = 0
region = ""
for m in pr.finditer(original_data, start):
if m:
region = m[0]
break
for m in p.finditer(original_data, start):
if m:
if hex_p.sub('', m[0]).strip() != '': #Remove Hex values
return dict(
aws_access_key=found,
aws_access_secret=m[0],
aws_region=region,
severity=100
)
return dict(
aws_access_key=found,
aws_region=region,
severity=30
)
except Exception as e:
return {} | PypiClean |
/FiPy-3.4.4.tar.gz/FiPy-3.4.4/fipy/variables/addOverFacesVariable.py | from __future__ import division
from __future__ import unicode_literals
__all__ = []
from fipy.tools import numerix
from fipy.tools import inline
from fipy.variables.cellVariable import CellVariable
class _AddOverFacesVariable(CellVariable):
r"""surface integral of `self.faceVariable`, :math:`\phi_f`
.. math:: \int_S \phi_f\,dS \approx \frac{\sum_f \phi_f A_f}{V_P}
Returns
-------
integral : CellVariable
volume-weighted sum
"""
def __init__(self, faceVariable, mesh = None):
if not mesh:
mesh = faceVariable.mesh
CellVariable.__init__(self, mesh, hasOld = 0, elementshape=faceVariable.shape[:-1])
self.faceVariable = self._requires(faceVariable)
def _calcValue(self):
if inline.doInline and self.faceVariable.rank < 2:
return self._calcValueInline()
else:
return self._calcValueNoInline()
def _calcValueInline(self):
NCells = self.mesh.numberOfCells
ids = self.mesh.cellFaceIDs
val = self._array.copy()
inline._runInline("""
int i;
for(i = 0; i < numberOfCells; i++)
{
int j;
value[i] = 0.;
for(j = 0; j < numberOfCellFaces; j++)
{
// cellFaceIDs can be masked, which caused subtle and
// unreproducible problems on OS X (who knows why not elsewhere)
long id = ids[i + j * numberOfCells];
if (id >= 0) {
value[i] += orientations[i + j * numberOfCells] * faceVariable[id];
}
}
value[i] = value[i] / cellVolume[i];
}
""",
numberOfCellFaces = self.mesh._maxFacesPerCell,
numberOfCells = NCells,
faceVariable = self.faceVariable.numericValue,
ids = numerix.array(ids),
value = val,
orientations = numerix.array(self.mesh._cellToFaceOrientations),
cellVolume = numerix.array(self.mesh.cellVolumes))
return self._makeValue(value = val)
def _calcValueNoInline(self):
ids = self.mesh.cellFaceIDs
contributions = numerix.take(self.faceVariable, ids, axis=-1)
# FIXME: numerix.MA.filled casts away dimensions
s = (numerix.newaxis,) * (len(contributions.shape) - 2) + (slice(0, None, None),) + (slice(0, None, None),)
faceContributions = contributions * self.mesh._cellToFaceOrientations[s]
return numerix.tensordot(numerix.ones(faceContributions.shape[-2], 'd'),
numerix.MA.filled(faceContributions, 0.), (0, -2)) / self.mesh.cellVolumes | PypiClean |
/CODCQC-1.0.tar.gz/CODCQC-1.0/README.md | # CODCQC
An open source Python interface to the quality control of ocean in-situ observations.
CODC-QC is an open source Python interface to the quality control of ocean *in-situ* observations (e.g, temperature profiles, salinity profiles etc.). It was developed to reduce human-workload and time-consuming on manual quality control as well as adapt the increasing volume of daily real-time data flow on observing system and large data centers.
The *in-situ* observations collected from the ocean are quality-heterogeneous. Decades of efforts have been dedicated to developing different manual or automatic quality control (QC) system to improve the quality and availability of ocean database, which is one of the basic tasks in many oceanic studies.
The goals of developing the auutomatic QC (AutoQC) is to provide a quality-hemogeonous database, with reduciing human-workload and time-consuming on manual QC as well as adapting the increasing volume of daily real-time data flow on observing system and large data centers.
Here, we delveoped an AutoQC system (we refer to this procedure as **CODC-QC** system (CAS-Ocean Data Center (CODC) Quality Control system) to quality control the ocean *in-situ* observations.
#### The User Manual of CODC-QC is available now!! ([clip here](https://github.com/zqtzt/CODCQC/))
#### Installing CODC-QC
**Please first download the Python CODCQC package `CODCQC-1.0-py3-none-any.whl` [here](http://www.ocean.iap.ac.cn/ftp/cheng/CODC-QC/) and follow the User Manual to install and get started.**
> #### Why CODC-QC
- CODC-QC contains several QC checks that can be easily combined and tuned by users.
- CODC-QC provides many typical data interface for inputting raw data.
- The QC flags in CODC-QC are optional multiple categories, which depends on user's purposes.
- CODC-QC is a climatology-based automatic quality control algorithm. It is good at detecting bad data with paying an acceptable low price of sacrificing good data.
- The performance of CODC-QC has been meticulously analyzed and evaluated by comparing it with other international QC systems in peer review now.
**In this version, CODC-QC is only avaliable for temperature observations**. It convers all temperature data instrument types (e.g., Bottle, XBT, CTD, Argo, APB etc.). In the future, CODC-QC will extent to salinity observations and oxygen observations.
We are warmly welcome feedback/questions/fork/pull requests/improved the CODC-QC project!!
If you have any questions/suggestions about this program, or if you find some bugs in this program, or even if you are willing to debug/improved the CODC-QC project, please feel free and do not hesitate to tell us via:
+ [Create an issue](https://github.com/zqtzt/COMS-AutoQC/issues) in the Github community
+ [Pull requests](https://github.com/zqtzt/COMS-AutoQC/pulls]) your debugged/improved codes in the Github community
+ Send an email to us: <font color=#0099ff><u>[email protected]</u> </font><font color=#0099ff> or <u>[email protected]</u> </font>
**Reference: Tan Z., Cheng L., Gouretski V., Zhang B., Wang Y., Li F., Liu Z., Zhu J., 2022: A new automatic quality control system for ocean in-situ temperature observations and impact on ocean warming estimate. Deep Sea Research Part I, 103961, https://doi.org/10.1016/j.dsr.2022.103961 **
Author: Zhetao Tan (<font color=#0099ff><u>[email protected]</u></font>)
Contributor: Lijing Cheng, Viktor Gourestki, Yanjun Wang, Bin Zhang
Center for Ocean Mega-Science, Chinese Academy of Sciences (COMS/CAS)
Institute of Atmospheric Physics, Chinese Academy of Sciences (IAP/CAS)
| PypiClean |
/Nuitka_winsvc-1.7.10-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/glob2/glob2/impl.py |
from __future__ import absolute_import
import sys
import os
import re
from os.path import join
from . import fnmatch
try:
from itertools import imap
except ImportError:
imap = map
class Globber(object):
listdir = staticmethod(os.listdir)
isdir = staticmethod(os.path.isdir)
islink = staticmethod(os.path.islink)
exists = staticmethod(os.path.lexists)
def walk(self, top, followlinks=False, sep=None):
"""A simplified version of os.walk (code copied) that uses
``self.listdir``, and the other local filesystem methods.
Because we don't care about file/directory distinctions, only
a single list is returned.
"""
try:
names = self.listdir(top)
except os.error as err:
return
items = []
for name in names:
items.append(name)
yield top, items
for name in items:
new_path = _join_paths([top, name], sep=sep)
if followlinks or not self.islink(new_path):
for x in self.walk(new_path, followlinks):
yield x
def glob(self, pathname, with_matches=False, include_hidden=False, recursive=True,
norm_paths=True, case_sensitive=True, sep=None):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If ``include_hidden`` is True, then files and folders starting with
a dot are also returned.
"""
return list(self.iglob(pathname, with_matches, include_hidden,
norm_paths, case_sensitive, sep))
def iglob(self, pathname, with_matches=False, include_hidden=False, recursive=True,
norm_paths=True, case_sensitive=True, sep=None):
"""Return an iterator which yields the paths matching a pathname
pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If ``with_matches`` is True, then for each matching path
a 2-tuple will be returned; the second element if the tuple
will be a list of the parts of the path that matched the individual
wildcards.
If ``include_hidden`` is True, then files and folders starting with
a dot are also returned.
"""
result = self._iglob(pathname, True, include_hidden,
norm_paths, case_sensitive, sep)
if with_matches:
return result
return imap(lambda s: s[0], result)
def _iglob(self, pathname, rootcall, include_hidden,
norm_paths, case_sensitive, sep):
"""Internal implementation that backs :meth:`iglob`.
``rootcall`` is required to differentiate between the user's call to
iglob(), and subsequent recursive calls, for the purposes of resolving
certain special cases of ** wildcards. Specifically, "**" is supposed
to include the current directory for purposes of globbing, but the
directory itself should never be returned. So if ** is the lastmost
part of the ``pathname`` given the user to the root call, we want to
ignore the current directory. For this, we need to know which the root
call is.
"""
# Short-circuit if no glob magic
if not has_magic(pathname):
if self.exists(pathname):
yield pathname, ()
return
# If no directory part is left, assume the working directory
dirname, basename = os.path.split(pathname)
# If the directory is globbed, recurse to resolve.
# If at this point there is no directory part left, we simply
# continue with dirname="", which will search the current dir.
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and has_magic(dirname):
# Note that this may return files, which will be ignored
# later when we try to use them as directories.
# Prefiltering them here would only require more IO ops.
dirs = self._iglob(dirname, False, include_hidden,
norm_paths, case_sensitive, sep)
else:
dirs = [(dirname, ())]
# Resolve ``basename`` expr for every directory found
for dirname, dir_groups in dirs:
for name, groups in self.resolve_pattern(dirname, basename,
not rootcall, include_hidden,
norm_paths, case_sensitive, sep):
yield _join_paths([dirname, name], sep=sep), dir_groups + groups
def resolve_pattern(self, dirname, pattern, globstar_with_root, include_hidden,
norm_paths, case_sensitive, sep):
"""Apply ``pattern`` (contains no path elements) to the
literal directory in ``dirname``.
If pattern=='', this will filter for directories. This is
a special case that happens when the user's glob expression ends
with a slash (in which case we only want directories). It simpler
and faster to filter here than in :meth:`_iglob`.
"""
if sys.version_info[0] == 3:
if isinstance(pattern, bytes):
dirname = bytes(os.curdir, 'ASCII')
else:
if isinstance(pattern, unicode) and not isinstance(dirname, unicode):
dirname = unicode(dirname, sys.getfilesystemencoding() or
sys.getdefaultencoding())
# If no magic, short-circuit, only check for existence
if not has_magic(pattern):
if pattern == '':
if self.isdir(dirname):
return [(pattern, ())]
else:
if self.exists(_join_paths([dirname, pattern], sep=sep)):
return [(pattern, ())]
return []
if not dirname:
dirname = os.curdir
try:
if pattern == '**':
# Include the current directory in **, if asked; by adding
# an empty string as opposed to '.', we spare ourselves
# having to deal with os.path.normpath() later.
names = [''] if globstar_with_root else []
for top, entries in self.walk(dirname, sep=sep):
_mkabs = lambda s: _join_paths([top[len(dirname) + 1:], s], sep=sep)
names.extend(map(_mkabs, entries))
# Reset pattern so that fnmatch(), which does not understand
# ** specifically, will only return a single group match.
pattern = '*'
else:
names = self.listdir(dirname)
except os.error:
return []
if not include_hidden and not _ishidden(pattern):
# Remove hidden files, but take care to ensure
# that the empty string we may have added earlier remains.
# Do not filter out the '' that we might have added earlier
names = filter(lambda x: not x or not _ishidden(x), names)
return fnmatch.filter(names, pattern, norm_paths, case_sensitive, sep)
default_globber = Globber()
glob = default_globber.glob
iglob = default_globber.iglob
del default_globber
magic_check = re.compile('[*?[]')
magic_check_bytes = re.compile(b'[*?[]')
def has_magic(s):
if isinstance(s, bytes):
match = magic_check_bytes.search(s)
else:
match = magic_check.search(s)
return match is not None
def _ishidden(path):
return path[0] in ('.', b'.'[0])
def _join_paths(paths, sep=None):
path = join(*paths)
if sep:
path = re.sub(r'\/', sep, path) # cached internally
return path | PypiClean |
/Altair%20Smartworks%20SDK-0.0.1.tar.gz/Altair Smartworks SDK-0.0.1/openapi_client/model/action_reboot_response.py | import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.property_cpu_response_links import PropertyCPUResponseLinks
globals()['PropertyCPUResponseLinks'] = PropertyCPUResponseLinks
class ActionRebootResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'title': (str,), # noqa: E501
'description': (str,), # noqa: E501
'links': ([PropertyCPUResponseLinks],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'title': 'title', # noqa: E501
'description': 'description', # noqa: E501
'links': 'links', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ActionRebootResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
title (str): [optional] # noqa: E501
description (str): [optional] # noqa: E501
links ([PropertyCPUResponseLinks]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value) | PypiClean |
/BlazeWeb-0.6.2-py3-none-any.whl/blazeweb/middleware.py | import logging
from os import path
from io import StringIO
from tempfile import TemporaryFile
import time
from beaker.middleware import SessionMiddleware
from blazeutils import randchars, pformat, tolist
from paste.registry import RegistryManager
from werkzeug.datastructures import EnvironHeaders
from werkzeug.debug import DebuggedApplication
from werkzeug.middleware.shared_data import SharedDataMiddleware
from werkzeug.wsgi import LimitedStream
from blazeweb import routing
from blazeweb.hierarchy import findfile, FileNotFound
from blazeweb.globals import settings, ag
from blazeweb.utils.filesystem import mkdirs
log = logging.getLogger(__name__)
class HttpRequestLogger(object):
"""
Logs the full HTTP request to text files for debugging purposes
Note: should only be used low-request applications and/or with filters.
Example (<project>/applications.py):
def make_wsgi(profile='Default'):
config.appinit(settingsmod, profile)
app = WSGIApp()
<...snip...>
app = HttpRequestLogger(app, enabled=True, path_info_filter='files/add',
request_method_filter='post')
return app
"""
def __init__(self, application, enabled=False, path_info_filter=None,
request_method_filter=None):
self.log_dir = path.join(settings.dirs.logs, 'http_requests')
mkdirs(self.log_dir)
self.application = application
self.enabled = enabled
self.pi_filter = path_info_filter
self.rm_filter = request_method_filter
def __call__(self, environ, start_response):
if self.enabled:
self.headers = EnvironHeaders(environ)
should_log = True
if self.pi_filter is not None and self.pi_filter not in environ['PATH_INFO']:
should_log = False
if self.rm_filter is not None and environ['REQUEST_METHOD'].lower() not in [
x.lower() for x in tolist(self.rm_filter)
]:
should_log = False
if should_log:
wsgi_input = self.replace_wsgi_input(environ)
fname = '%s_%s' % (time.time(), randchars())
fh = open(path.join(self.log_dir, fname), 'wb+')
try:
fh.write(pformat(environ))
fh.write('\n')
fh.write(wsgi_input.read())
wsgi_input.seek(0)
finally:
fh.close()
return self.application(environ, start_response)
def replace_wsgi_input(self, environ):
content_length = self.headers.get('content-length', type=int)
limited_stream = LimitedStream(environ['wsgi.input'], content_length)
if content_length is not None and content_length > 1024 * 500:
wsgi_input = TemporaryFile('wb+')
else:
wsgi_input = StringIO()
wsgi_input.write(limited_stream.read())
wsgi_input.seek(0)
environ['wsgi.input'] = wsgi_input
return environ['wsgi.input']
class StaticFileServer(SharedDataMiddleware):
"""
Serves static files based on hierarchy structure
"""
def __init__(self, app, **kwargs):
exports = {'/' + routing.static_url('/'): ''}
SharedDataMiddleware.__init__(self, app, exports, **kwargs)
def debug(self, pathpart, msg):
log.debug('StaticFileServer 404 (%s): %s', pathpart, msg)
def get_directory_loader(self, directory):
def loader(pathpart):
if pathpart is None:
self.debug(pathpart, 'pathpart is None')
return None, None
if not pathpart.count('/'):
self.debug(pathpart, 'pathpart had no slashes')
return None, None
type, locpath = pathpart.split('/', 1)
if not locpath:
self.debug(pathpart, 'pathpart had type, but not locpath')
return None, None
if type not in ('app', 'component'):
self.debug(pathpart, 'type was not "app" or "component"')
return None, None
if type == 'component':
if not locpath.count('/'):
self.debug(pathpart, 'component type, but locpath had no slashes')
return None, None
component, locpath = locpath.split('/', 1)
# look in the static directory
locpath = 'static/' + locpath
if type == 'app':
endpoint = locpath
else:
endpoint = '%s:%s' % (component, locpath)
try:
fpath = findfile(endpoint)
return path.basename(fpath), self._opener(fpath)
except FileNotFound:
self.debug(pathpart, 'endpoint "%s" not found' % endpoint)
return None, None
return loader
def static_files(app):
settings = ag.app.settings
if settings.static_files.enabled:
# serve static files from static directory (e.g. after copying
# from the source packages; use static-copy command for that)
if settings.static_files.location == 'static':
exported_dirs = {'/' + routing.static_url('/'): settings.dirs.static}
return SharedDataMiddleware(app, exported_dirs)
# serve static files from source packages based on hierarchy rules
return StaticFileServer(app)
return app
class RegistrySetup(object):
"""
Sets up the paste.registry objects and application level
globals for each request.
"""
def __init__(self, wsgiapp, bwapp):
self.wsgiapp = wsgiapp
self.bwapp = bwapp
def __call__(self, environ, start_response):
environ['paste.registry'].register(settings, self.bwapp.settings)
environ['paste.registry'].register(ag, self.bwapp.ag)
return self.wsgiapp(environ, start_response)
def full_wsgi_stack(app):
"""
returns the WSGIApp wrapped in common middleware
"""
settings = ag.app.settings
if settings.beaker.enabled:
app = SessionMiddleware(app, **dict(settings.beaker))
app = static_files(app)
app = minimal_wsgi_stack(app)
# show nice stack traces and debug output if enabled
if settings.debugger.enabled:
app = DebuggedApplication(app, evalex=settings.debugger.interactive)
# log http requests, use sparingly on production servers
if settings.logs.http_requests.enabled:
app = HttpRequestLogger(
app, True,
settings.logs.http_requests.filters.path_info,
settings.logs.http_requests.filters.request_method
)
return app
def minimal_wsgi_stack(app):
"""
returns a WSGI application wrapped in minimal middleware, mostly useful
for internal testing
"""
app = RegistrySetup(app, ag.app)
app = RegistryManager(app)
return app | PypiClean |
/ImSwitch-2.0.0.tar.gz/ImSwitch-2.0.0/imswitch/imcontrol/model/managers/rs232/GRBLManager.py | from imswitch.imcommon.model import initLogger
import imswitch.imcontrol.model.interfaces.grbldriver as grbldriver
class GRBLManager:
""" A general-purpose RS232 manager that together with a general-purpose
RS232Driver interface can handle an arbitrary RS232 communication channel,
with all the standard serial communication protocol parameters as defined
in the hardware control configuration.
Manager properties:
- ``port``
- ``encoding``
- ``recv_termination``
- ``send_termination``
- ``baudrate``
- ``bytesize``
- ``parity``
- ``stopbits``
- ``rtscts``
- ``dsrdtr``
- ``xonxoff``
"""
def __init__(self, rs232Info, name, **_lowLevelManagers):
self.__logger = initLogger(self, instanceName=name)
self._settings = rs232Info.managerProperties
self._name = name
self._port = rs232Info.managerProperties['port']
try:
self.is_home = rs232Info.managerProperties['is_home']
except:
self.is_home = False
self._board = grbldriver.GrblDriver(self._port)
# init the stage
self._board.write_global_config()
self._board.write_all_settings()
#self.board.verify_settings()
self._board.reset_stage()
if self.is_home:
self._board.home()
def query(self, arg: str) -> str:
""" Sends the specified command to the RS232 device and returns a
string encoded from the received bytes. """
return self._board._write(arg)
def finalize(self):
self.self._board.close()
# Copyright (C) 2020-2021 ImSwitch developers
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/Grid2Op-1.9.3-py3-none-any.whl/grid2op/Rules/RulesChecker.py |
import copy
import warnings
from grid2op.Exceptions import Grid2OpException
from grid2op.Rules.BaseRules import BaseRules
from grid2op.Rules.AlwaysLegal import AlwaysLegal
class RulesChecker(object):
"""
Class that define the rules of the game.
"""
def __init__(self, legalActClass=AlwaysLegal):
"""
Parameters
----------
legalActClass: ``type``
The class that will be used to tell if the actions are legal or not. The class must be given, and not
an object of this class. It should derived from :class:`BaseRules`.
"""
if isinstance(legalActClass, type):
if not issubclass(legalActClass, BaseRules):
raise Grid2OpException(
"Gamerules: legalActClass should be initialize with a class deriving "
"from BaseRules and not {}".format(type(legalActClass))
)
self.legal_action = legalActClass()
else:
if not isinstance(legalActClass, BaseRules):
raise Grid2OpException(
'Parameter "legalActClass" used to build the Environment should be an instance of the '
'grid2op.BaseRules class, type provided is "{}"'.format(
type(legalActClass)
)
)
try:
self.legal_action = copy.deepcopy(legalActClass)
except Exception as exc_:
warnings.warn("You passed the legal action as an instance that cannot be deepcopied. It will be "
"used 'as is', we do not garantee anything if you modify the original object.")
self.legal_action = legalActClass
def initialize(self, env):
"""
This function is used to inform the class instance about the environment specification.
It can be the place to assert the defined rules are suited for the environement.
Parameters
----------
env: :class:`grid2op.Environment.Environment`
The environment on which the action is performed.
"""
self.legal_action.initialize(env)
def __call__(self, action, env):
"""
Says if an action is legal or not.
Parameters
----------
action: :class:`grid2op.Action.Action`
The action that need to be tested
env: :class:`grid2op.Environment.Environment`
The current used environment.
Returns
-------
is_legal: ``bool``
Assess if the given action is legal or not. ``True``: the action is legal, ``False`` otherwise
reason:
A grid2op IllegalException given the reason for which the action is illegal
"""
return self.legal_action(action, env) | PypiClean |
/HolmesV-2021.10.8.tar.gz/HolmesV-2021.10.8/mycroft/audio/services/simple/__init__.py | import signal
from threading import Lock
from time import sleep
from mycroft.audio.services import AudioBackend
from mycroft.messagebus.message import Message
from mycroft.util.log import LOG
from mycroft.util import play_mp3, play_ogg, play_wav
import mimetypes
import re
from requests import Session
def find_mime(path):
mime = None
if path.startswith('http'):
response = Session().head(path, allow_redirects=True)
if 200 <= response.status_code < 300:
mime = response.headers['content-type']
if not mime:
mime = mimetypes.guess_type(path)[0]
# Remove any http address arguments
if not mime:
mime = mimetypes.guess_type(re.sub(r'\?.*$', '', path))[0]
if mime:
return mime.split('/')
else:
return (None, None)
class SimpleAudioService(AudioBackend):
"""
Simple Audio backend for both mpg123 and the ogg123 player.
This one is rather limited and only implements basic usage.
"""
def __init__(self, config, bus, name='simple'):
super().__init__(config, bus)
self.config = config
self.process = None
self.bus = bus
self.name = name
self._stop_signal = False
self._is_playing = False
self._paused = False
self.tracks = []
self.index = 0
self.supports_mime_hints = True
mimetypes.init()
self.track_lock = Lock()
self.bus.on('SimpleAudioServicePlay', self._play)
def supported_uris(self):
return ['file', 'http']
def clear_list(self):
with self.track_lock:
self.tracks = []
def add_list(self, tracks):
with self.track_lock:
self.tracks += tracks
LOG.info("Track list is " + str(tracks))
def _get_track(self, track_data):
if isinstance(track_data, list):
track = track_data[0]
mime = track_data[1]
mime = mime.split('/')
else: # Assume string
track = track_data
mime = find_mime(track)
return track, mime
def _play(self, message):
"""Implementation specific async method to handle playback.
This allows mpg123 service to use the next method as well
as basic play/stop.
"""
LOG.info('SimpleAudioService._play')
# Stop any existing audio playback
self._stop_running_process()
repeat = message.data.get('repeat', False)
self._is_playing = True
self._paused = False
with self.track_lock:
if len(self.tracks) > self.index:
track, mime = self._get_track(self.tracks[self.index])
else:
return
LOG.debug('Mime info: {}'.format(mime))
# Indicate to audio service which track is being played
if self._track_start_callback:
self._track_start_callback(track)
# Replace file:// uri's with normal paths
track = track.replace('file://', '')
try:
if 'mpeg' in mime[1]:
self.process = play_mp3(track)
elif 'ogg' in mime[1]:
self.process = play_ogg(track)
elif 'wav' in mime[1]:
self.process = play_wav(track)
else:
# If no mime info could be determined guess mp3
self.process = play_mp3(track)
except FileNotFoundError as e:
LOG.error('Couldn\'t play audio, {}'.format(repr(e)))
self.process = None
except Exception as e:
LOG.exception(repr(e))
self.process = None
# Wait for completion or stop request
while (self._is_process_running() and not self._stop_signal):
sleep(0.25)
if self._stop_signal:
self._stop_running_process()
self._is_playing = False
self._paused = False
return
else:
self.process = None
# if there are more tracks available play next
self.index += 1
with self.track_lock:
if self.index < len(self.tracks) or repeat:
if self.index >= len(self.tracks):
self.index = 0
self.bus.emit(Message('SimpleAudioServicePlay',
{'repeat': repeat}))
else:
self._track_start_callback(None)
self._is_playing = False
self._paused = False
def play(self, repeat=False):
LOG.info('Call SimpleAudioServicePlay')
self.index = 0
self.bus.emit(Message('SimpleAudioServicePlay', {'repeat': repeat}))
def stop(self):
LOG.info('SimpleAudioServiceStop')
self._stop_signal = True
while self._is_playing:
sleep(0.1)
self._stop_signal = False
def _pause(self):
""" Pauses playback if possible.
Returns: (bool) New paused status:
"""
if self.process:
# Suspend the playback process
self.process.send_signal(signal.SIGSTOP)
return True # After pause the service is paused
else:
return False
def pause(self):
if not self._paused:
self._paused = self._pause()
def _resume(self):
""" Resumes playback if possible.
Returns: (bool) New paused status:
"""
if self.process:
# Resume the playback process
self.process.send_signal(signal.SIGCONT)
return False # After resume the service is no longer paused
else:
return True
def resume(self):
if self._paused:
# Resume the playback process
self._paused = self._resume()
def next(self):
# Terminate process to continue to next
self._stop_running_process()
def previous(self):
pass
def lower_volume(self):
if not self._paused:
self._pause() # poor-man's ducking
def restore_volume(self):
if not self._paused:
self._resume() # poor-man's unducking
def _is_process_running(self):
return self.process and self.process.poll() is None
def _stop_running_process(self):
if self._is_process_running():
if self._paused:
# The child process must be "unpaused" in order to be stopped
self._resume()
self.process.terminate()
countdown = 10
while self._is_process_running() and countdown > 0:
sleep(0.1)
countdown -= 1
if self._is_process_running():
# Failed to shutdown when asked nicely. Force the issue.
LOG.debug("Killing currently playing audio...")
self.process.kill()
self.process = None
def load_service(base_config, bus):
backends = base_config.get('backends', [])
services = [(b, backends[b]) for b in backends
if backends[b]['type'] == 'simple' and
backends[b].get('active', False)]
instances = [SimpleAudioService(s[1], bus, s[0]) for s in services]
return instances | PypiClean |
/Notable-0.4.2.tar.gz/Notable-0.4.2/notable/static/lib/ace/src-min/mode-diff.js | define("ace/mode/diff",["require","exports","module","ace/lib/oop","ace/mode/text","ace/tokenizer","ace/mode/diff_highlight_rules","ace/mode/folding/diff"],function(e,t,n){var r=e("../lib/oop"),i=e("./text").Mode,s=e("../tokenizer").Tokenizer,o=e("./diff_highlight_rules").DiffHighlightRules,u=e("./folding/diff").FoldMode,a=function(){this.$tokenizer=new s((new o).getRules()),this.foldingRules=new u(["diff","index","\\+{3}","@@|\\*{5}"],"i")};r.inherits(a,i),function(){}.call(a.prototype),t.Mode=a}),define("ace/mode/diff_highlight_rules",["require","exports","module","ace/lib/oop","ace/mode/text_highlight_rules"],function(e,t,n){var r=e("../lib/oop"),i=e("./text_highlight_rules").TextHighlightRules,s=function(){this.$rules={start:[{regex:"^(?:\\*{15}|={67}|-{3}|\\+{3})$",token:"punctuation.definition.separator.diff",name:"keyword"},{regex:"^(@@)(\\s*.+?\\s*)(@@)(.*)$",token:["constant","constant.numeric","constant","comment.doc.tag"]},{regex:"^(\\d+)([,\\d]+)(a|d|c)(\\d+)([,\\d]+)(.*)$",token:["constant.numeric","punctuation.definition.range.diff","constant.function","constant.numeric","punctuation.definition.range.diff","invalid"],name:"meta."},{regex:"^(\\-{3}|\\+{3}|\\*{3})( .+)$",token:["constant.numeric","meta.tag"]},{regex:"^([!+>])(.*?)(\\s*)$",token:["support.constant","text","invalid"]},{regex:"^([<\\-])(.*?)(\\s*)$",token:["support.function","string","invalid"]},{regex:"^(diff)(\\s+--\\w+)?(.+?)( .+)?$",token:["variable","variable","keyword","variable"]},{regex:"^Index.+$",token:"variable"},{regex:"^\\s+$",token:"text"},{regex:"\\s*$",token:"invalid"},{defaultToken:"invisible",caseInsensitive:!0}]}};r.inherits(s,i),t.DiffHighlightRules=s}),define("ace/mode/folding/diff",["require","exports","module","ace/lib/oop","ace/mode/folding/fold_mode","ace/range"],function(e,t,n){var r=e("../../lib/oop"),i=e("./fold_mode").FoldMode,s=e("../../range").Range,o=t.FoldMode=function(e,t){this.regExpList=e,this.flag=t,this.foldingStartMarker=RegExp("^("+e.join("|")+")",this.flag)};r.inherits(o,i),function(){this.getFoldWidgetRange=function(e,t,n){var r=e.getLine(n),i={row:n,column:r.length},o=this.regExpList;for(var u=1;u<=o.length;u++){var a=RegExp("^("+o.slice(0,u).join("|")+")",this.flag);if(a.test(r))break}for(var f=e.getLength();++n<f;){r=e.getLine(n);if(a.test(r))break}if(n==i.row+1)return;return s.fromPoints(i,{row:n-1,column:r.length})}}.call(o.prototype)}) | PypiClean |
/MLBPlayerIDs-0.1.0.tar.gz/MLBPlayerIDs-0.1.0/mlbids/_sfbb.py | import collections
import typing
import bs4
import requests
HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"
}
def get_soup(url: str) -> bs4.BeautifulSoup:
"""
:param url: The URL of the webpage to scrape
:return: A `BeautifulSoup` object for the passed URL
"""
res = requests.get(url, headers=HEADERS)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, features="lxml")
return soup
class SFBBTools:
"""
Web scraper for the `Tools`_ page of the Smart Fantasy Baseball website.
.. _Tools: https://www.smartfantasybaseball.com/tools/
"""
def __init__(self):
self._base_address = "https://www.smartfantasybaseball.com/tools/"
class URLs(typing.NamedTuple):
"""
Contains URLs for view/downloading the player ID map data
.. py:attribute:: excel_download
Download player ID map and CHANGELOG as an Excel workbook
.. py:attribute:: web_view
View player ID map as a webpage
.. py:attribute:: csv_download
Download player ID map as a CSV file
.. py:attribute:: changelog_web_view
View player ID map CHANGELOG as a webpage
.. py:attribute:: changelog_csv_download
Download player ID map CHANGELOG as a CSV file
"""
excel_download: str
web_view: str
csv_download: str
changelog_web_view: str
changelog_csv_download: str
@property
def base_address(self) -> str:
"""
:return: The URL for the `Tools` page of the `Smart Fantasy Baseball` website
"""
return self._base_address
@property
def _soup(self) -> bs4.BeautifulSoup:
"""
:return: The parsed HTML document of :py:attr:`SFBB.base_address`
"""
return get_soup(self.base_address)
@property
def _element(self) -> bs4.Tag:
"""
:return: The HTML tag corresponding to the element containing the redirect URLs
"""
css = "div.entry-content > div > table tr:nth-child(2) > td:first-child"
element = self._soup.select_one(css)
return element
@property
def urls(self) -> URLs:
"""
:return: The redirect URLs for viewing/downloading the player ID map
"""
data = collections.defaultdict()
hrefs = [e.attrs.get("href") for e in self._element.select("a")]
(
data["excel_download"],
data["web_view"],
data["csv_download"],
data["changelog_web_view"],
data["changelog_csv_download"]
) = hrefs
return self.URLs(**data) | PypiClean |
/DJQ_Distributions-1.0.tar.gz/DJQ_Distributions-1.0/DJQ_Distributions/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/GenMotion-0.0.4-py3-none-any.whl/genmotion/algorithm/action_conditioned/utils/rotation_conversions.py |
import functools
from typing import Optional
import torch
import torch.nn.functional as F
"""
The transformation matrices returned from the functions in this file assume
the points on which the transformation will be applied are column vectors.
i.e. the R matrix is structured as
R = [
[Rxx, Rxy, Rxz],
[Ryx, Ryy, Ryz],
[Rzx, Rzy, Rzz],
] # (3, 3)
This matrix can be applied to column vectors by post multiplication
by the points e.g.
points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point
transformed_points = R * points
To apply the same matrix to points which are row vectors, the R matrix
can be transposed and pre multiplied by the points:
e.g.
points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point
transformed_points = points * R.transpose(1, 0)
"""
def quaternion_to_matrix(quaternions):
"""
Convert rotations given as quaternions to rotation matrices.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
r, i, j, k = torch.unbind(quaternions, -1)
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = torch.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def _copysign(a, b):
"""
Return a tensor where each element has the absolute value taken from the,
corresponding element of a, with sign taken from the corresponding
element of b. This is like the standard copysign floating-point operation,
but is not careful about negative 0 and NaN.
Args:
a: source tensor.
b: tensor whose signs will be used, of the same shape as a.
Returns:
Tensor of the same shape as a with the signs of b.
"""
signs_differ = (a < 0) != (b < 0)
return torch.where(signs_differ, -a, a)
def _sqrt_positive_part(x):
"""
Returns torch.sqrt(torch.max(0, x))
but with a zero subgradient where x is 0.
"""
ret = torch.zeros_like(x)
positive_mask = x > 0
ret[positive_mask] = torch.sqrt(x[positive_mask])
return ret
def matrix_to_quaternion(matrix):
"""
Convert rotations given as rotation matrices to quaternions.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
m00 = matrix[..., 0, 0]
m11 = matrix[..., 1, 1]
m22 = matrix[..., 2, 2]
o0 = 0.5 * _sqrt_positive_part(1 + m00 + m11 + m22)
x = 0.5 * _sqrt_positive_part(1 + m00 - m11 - m22)
y = 0.5 * _sqrt_positive_part(1 - m00 + m11 - m22)
z = 0.5 * _sqrt_positive_part(1 - m00 - m11 + m22)
o1 = _copysign(x, matrix[..., 2, 1] - matrix[..., 1, 2])
o2 = _copysign(y, matrix[..., 0, 2] - matrix[..., 2, 0])
o3 = _copysign(z, matrix[..., 1, 0] - matrix[..., 0, 1])
return torch.stack((o0, o1, o2, o3), -1)
def _axis_angle_rotation(axis: str, angle):
"""
Return the rotation matrices for one of the rotations about an axis
of which Euler angles describe, for each value of the angle given.
Args:
axis: Axis label "X" or "Y or "Z".
angle: any shape tensor of Euler angles in radians
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
cos = torch.cos(angle)
sin = torch.sin(angle)
one = torch.ones_like(angle)
zero = torch.zeros_like(angle)
if axis == "X":
R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos)
if axis == "Y":
R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos)
if axis == "Z":
R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one)
return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3))
def euler_angles_to_matrix(euler_angles, convention: str):
"""
Convert rotations given as Euler angles in radians to rotation matrices.
Args:
euler_angles: Euler angles in radians as tensor of shape (..., 3).
convention: Convention string of three uppercase letters from
{"X", "Y", and "Z"}.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3:
raise ValueError("Invalid input euler angles.")
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
matrices = map(_axis_angle_rotation, convention, torch.unbind(euler_angles, -1))
return functools.reduce(torch.matmul, matrices)
def _angle_from_tan(
axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool
):
"""
Extract the first or third Euler angle from the two members of
the matrix which are positive constant times its sine and cosine.
Args:
axis: Axis label "X" or "Y or "Z" for the angle we are finding.
other_axis: Axis label "X" or "Y or "Z" for the middle axis in the
convention.
data: Rotation matrices as tensor of shape (..., 3, 3).
horizontal: Whether we are looking for the angle for the third axis,
which means the relevant entries are in the same row of the
rotation matrix. If not, they are in the same column.
tait_bryan: Whether the first and third axes in the convention differ.
Returns:
Euler Angles in radians for each matrix in data as a tensor
of shape (...).
"""
i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis]
if horizontal:
i2, i1 = i1, i2
even = (axis + other_axis) in ["XY", "YZ", "ZX"]
if horizontal == even:
return torch.atan2(data[..., i1], data[..., i2])
if tait_bryan:
return torch.atan2(-data[..., i2], data[..., i1])
return torch.atan2(data[..., i2], -data[..., i1])
def _index_from_letter(letter: str):
if letter == "X":
return 0
if letter == "Y":
return 1
if letter == "Z":
return 2
def matrix_to_euler_angles(matrix, convention: str):
"""
Convert rotations given as rotation matrices to Euler angles in radians.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
convention: Convention string of three uppercase letters.
Returns:
Euler angles in radians as tensor of shape (..., 3).
"""
if len(convention) != 3:
raise ValueError("Convention must have 3 letters.")
if convention[1] in (convention[0], convention[2]):
raise ValueError(f"Invalid convention {convention}.")
for letter in convention:
if letter not in ("X", "Y", "Z"):
raise ValueError(f"Invalid letter {letter} in convention string.")
if matrix.size(-1) != 3 or matrix.size(-2) != 3:
raise ValueError(f"Invalid rotation matrix shape f{matrix.shape}.")
i0 = _index_from_letter(convention[0])
i2 = _index_from_letter(convention[2])
tait_bryan = i0 != i2
if tait_bryan:
central_angle = torch.asin(
matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0)
)
else:
central_angle = torch.acos(matrix[..., i0, i0])
o = (
_angle_from_tan(
convention[0], convention[1], matrix[..., i2], False, tait_bryan
),
central_angle,
_angle_from_tan(
convention[2], convention[1], matrix[..., i0, :], True, tait_bryan
),
)
return torch.stack(o, -1)
def random_quaternions(
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random quaternions representing rotations,
i.e. versors with nonnegative real part.
Args:
n: Number of quaternions in a batch to return.
dtype: Type to return.
device: Desired device of returned tensor. Default:
uses the current device for the default tensor type.
requires_grad: Whether the resulting tensor should have the gradient
flag set.
Returns:
Quaternions as tensor of shape (N, 4).
"""
o = torch.randn((n, 4), dtype=dtype, device=device, requires_grad=requires_grad)
s = (o * o).sum(1)
o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None]
return o
def random_rotations(
n: int, dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate random rotations as 3x3 rotation matrices.
Args:
n: Number of rotation matrices in a batch to return.
dtype: Type to return.
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type.
requires_grad: Whether the resulting tensor should have the gradient
flag set.
Returns:
Rotation matrices as tensor of shape (n, 3, 3).
"""
quaternions = random_quaternions(
n, dtype=dtype, device=device, requires_grad=requires_grad
)
return quaternion_to_matrix(quaternions)
def random_rotation(
dtype: Optional[torch.dtype] = None, device=None, requires_grad=False
):
"""
Generate a single random 3x3 rotation matrix.
Args:
dtype: Type to return
device: Device of returned tensor. Default: if None,
uses the current device for the default tensor type
requires_grad: Whether the resulting tensor should have the gradient
flag set
Returns:
Rotation matrix as tensor of shape (3, 3).
"""
return random_rotations(1, dtype, device, requires_grad)[0]
def standardize_quaternion(quaternions):
"""
Convert a unit quaternion to a standard form: one in which the real
part is non negative.
Args:
quaternions: Quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Standardized quaternions as tensor of shape (..., 4).
"""
return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions)
def quaternion_raw_multiply(a, b):
"""
Multiply two quaternions.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions shape (..., 4).
"""
aw, ax, ay, az = torch.unbind(a, -1)
bw, bx, by, bz = torch.unbind(b, -1)
ow = aw * bw - ax * bx - ay * by - az * bz
ox = aw * bx + ax * bw + ay * bz - az * by
oy = aw * by - ax * bz + ay * bw + az * bx
oz = aw * bz + ax * by - ay * bx + az * bw
return torch.stack((ow, ox, oy, oz), -1)
def quaternion_multiply(a, b):
"""
Multiply two quaternions representing rotations, returning the quaternion
representing their composition, i.e. the versor with nonnegative real part.
Usual torch rules for broadcasting apply.
Args:
a: Quaternions as tensor of shape (..., 4), real part first.
b: Quaternions as tensor of shape (..., 4), real part first.
Returns:
The product of a and b, a tensor of quaternions of shape (..., 4).
"""
ab = quaternion_raw_multiply(a, b)
return standardize_quaternion(ab)
def quaternion_invert(quaternion):
"""
Given a quaternion representing rotation, get the quaternion representing
its inverse.
Args:
quaternion: Quaternions as tensor of shape (..., 4), with real part
first, which must be versors (unit quaternions).
Returns:
The inverse, a tensor of quaternions of shape (..., 4).
"""
return quaternion * quaternion.new_tensor([1, -1, -1, -1])
def quaternion_apply(quaternion, point):
"""
Apply the rotation given by a quaternion to a 3D point.
Usual torch rules for broadcasting apply.
Args:
quaternion: Tensor of quaternions, real part first, of shape (..., 4).
point: Tensor of 3D points of shape (..., 3).
Returns:
Tensor of rotated points of shape (..., 3).
"""
if point.size(-1) != 3:
raise ValueError(f"Points are not in 3D, f{point.shape}.")
real_parts = point.new_zeros(point.shape[:-1] + (1,))
point_as_quaternion = torch.cat((real_parts, point), -1)
out = quaternion_raw_multiply(
quaternion_raw_multiply(quaternion, point_as_quaternion),
quaternion_invert(quaternion),
)
return out[..., 1:]
def axis_angle_to_matrix(axis_angle):
"""
Convert rotations given as axis/angle to rotation matrices.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
Rotation matrices as tensor of shape (..., 3, 3).
"""
return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle))
def matrix_to_axis_angle(matrix):
"""
Convert rotations given as rotation matrices to axis/angle.
Args:
matrix: Rotation matrices as tensor of shape (..., 3, 3).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
return quaternion_to_axis_angle(matrix_to_quaternion(matrix))
def axis_angle_to_quaternion(axis_angle):
"""
Convert rotations given as axis/angle to quaternions.
Args:
axis_angle: Rotations given as a vector in axis angle form,
as a tensor of shape (..., 3), where the magnitude is
the angle turned anticlockwise in radians around the
vector's direction.
Returns:
quaternions with real part first, as tensor of shape (..., 4).
"""
angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True)
half_angles = 0.5 * angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
quaternions = torch.cat(
[torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1
)
return quaternions
def quaternion_to_axis_angle(quaternions):
"""
Convert rotations given as quaternions to axis/angle.
Args:
quaternions: quaternions with real part first,
as tensor of shape (..., 4).
Returns:
Rotations given as a vector in axis angle form, as a tensor
of shape (..., 3), where the magnitude is the angle
turned anticlockwise in radians around the vector's
direction.
"""
norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True)
half_angles = torch.atan2(norms, quaternions[..., :1])
angles = 2 * half_angles
eps = 1e-6
small_angles = angles.abs() < eps
sin_half_angles_over_angles = torch.empty_like(angles)
sin_half_angles_over_angles[~small_angles] = (
torch.sin(half_angles[~small_angles]) / angles[~small_angles]
)
# for x small, sin(x/2) is about x/2 - (x/2)^3/6
# so sin(x/2)/x is about 1/2 - (x*x)/48
sin_half_angles_over_angles[small_angles] = (
0.5 - (angles[small_angles] * angles[small_angles]) / 48
)
return quaternions[..., 1:] / sin_half_angles_over_angles
def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor:
"""
Converts 6D rotation representation by Zhou et al. [1] to rotation matrix
using Gram--Schmidt orthogonalisation per Section B of [1].
Args:
d6: 6D rotation representation, of size (*, 6)
Returns:
batch of rotation matrices of size (*, 3, 3)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
a1, a2 = d6[..., :3], d6[..., 3:]
b1 = F.normalize(a1, dim=-1)
b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1
b2 = F.normalize(b2, dim=-1)
b3 = torch.cross(b1, b2, dim=-1)
return torch.stack((b1, b2, b3), dim=-2)
def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor:
"""
Converts rotation matrices to 6D rotation representation by Zhou et al. [1]
by dropping the last row. Note that 6D representation is not unique.
Args:
matrix: batch of rotation matrices of size (*, 3, 3)
Returns:
6D rotation representation, of size (*, 6)
[1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H.
On the Continuity of Rotation Representations in Neural Networks.
IEEE Conference on Computer Vision and Pattern Recognition, 2019.
Retrieved from http://arxiv.org/abs/1812.07035
"""
return matrix[..., :2, :].clone().reshape(*matrix.size()[:-2], 6) | PypiClean |
/NlvWxPython-4.2.0-cp37-cp37m-win_amd64.whl/wx/py/magic.py | __author__ = "David N. Mashburn <[email protected]>"
# created 07/01/2009
import keyword
from .parse import testForContinuations
aliasDict = {}
#DNM
# TODO : Still Refining this... seems to be ok for now... still finding gotchas, though!
# TODO : Multi-line strings seem to be correctly broken into commands by PyCrust(PySlices)
# TODO : Is there a better version of ls, cd, pwd, etc that could be used?
def magicSingle(command):
if command=='': # Pass if command is blank
return command
first_space=command.find(' ')
if command[0]==' ': # Pass if command begins with a space
pass
elif command[0]=='?': # Do help if starts with ?
command='help('+command[1:]+')'
elif command[0]=='!': # Use os.system if starts with !
command='sx("'+command[1:]+'")'
elif command in ('ls','pwd'): # automatically use ls and pwd with no arguments
command=command+'()'
elif command[:3] in ('ls ','cd '): # when using the 'ls ' or 'cd ' constructs, fill in both parentheses and quotes
command=command[:2]+'("'+command[3:]+'")'
elif command[:6] == 'alias ':
c = command[6:].lstrip().split(' ')
if len(c)<2:
#print('Not enough arguments for alias!')
command = ''
else:
n,v = c[0],' '.join(c[1:])
aliasDict[n]=v
command = ''
elif command.split(' ')[0] in aliasDict:
c = command.split(' ')
if len(c)<2:
command = 'sx("'+aliasDict[c[0]]+'")'
else:
command = 'sx("'+aliasDict[c[0]]+' '+' '.join(c[1:])+'")'
elif first_space!=-1: # if there is at least one space, add parentheses at beginning and end
cmds=command.split(' ')
if len(cmds)>1:
wd1=cmds[0]
wd2=cmds[1]
i=1
while wd2=='':
i+=1
if len(cmds)==i:
break
wd2=cmds[i]
if wd2=='':
return command
if (wd1[0].isalpha() or wd1[0]=='_') and (wd2[0].isalnum() or (wd2[0] in """."'_""")) and not keyword.iskeyword(wd1) and not keyword.iskeyword(wd2):
if wd1.replace('.','').replace('_','').isalnum():
command=wd1+'('+command[(first_space+1):]+')' # add parentheses where the first space was and at the end... hooray!
return command
def magic(command):
continuations = testForContinuations(command)
if len(continuations)==2: # Error case...
return command
elif len(continuations)==4:
stringContinuationList,indentationBlockList, \
lineContinuationList,parentheticalContinuationList = continuations
commandList=[]
firstLine = True
for i in command.split('\n'):
if firstLine:
commandList.append(magicSingle(i))
elif stringContinuationList.pop(0)==False and \
indentationBlockList.pop(0)==False and \
lineContinuationList.pop(0)==False and \
parentheticalContinuationList.pop(0)==False:
commandList.append(magicSingle(i)) # unless this is in a larger expression, use magic
else:
commandList.append(i)
firstLine=False
return '\n'.join(commandList) | PypiClean |
/Apilisk-0.2.1.tar.gz/Apilisk-0.2.1/apilisk/runner.py | import json
import copy
import pytz
import sys
from datetime import datetime
from apilisk.curl_caller import CurlCaller
from apilisk.printer import vprint, Colors
from apilisk.exceptions import ObjectNotFound, ApiliskException
from apiwatcher_pyclient.client import Client
class Runner(object):
def __init__(self, project_cfg, dataset_id):
"""
Initializes all the stuff
"""
self.project_hash = project_cfg["project_hash"]
self.project_name = project_cfg["project_name"]
self.testcases = {
str(item["id"]): item for item in project_cfg["testcases"]
}
self.requests = {
str(item["id"]): item for item in project_cfg["requests"]
}
self.dataset = None
if dataset_id is not None:
for dts in project_cfg["datasets"]:
if dts["id"] == dataset_id:
self.dataset = copy.deepcopy(dts)
if self.dataset == None:
raise ObjectNotFound(
u"Dataset with id {0} has not been found".format(
dataset_id
)
)
def run_project(self, debug=False, include_data=False):
"""
Runs testcases from project one project
"""
results = []
time_start = datetime.now()
total_count = len(self.testcases)
success_count = 0
failed_count = 0
vprint(
1, None,
u"## Starting project {0} ({1})".format(
self.project_name, self.project_hash
)
)
for tc_id in self.testcases:
res = self.run_one_testcase(tc_id, debug, include_data)
if res["status"] == "success":
success_count += 1
else:
failed_count += 1
results.append(res)
duration_sec = (datetime.now() - time_start).total_seconds()
if failed_count > 0:
vprint(
1, Colors.RED,
u"## Failed {0} testcases out of {1} in {2} sec.".format(
failed_count, total_count, duration_sec
)
)
else:
vprint(
1, Colors.GREEN, u"## Success in {0} sec".format(duration_sec)
)
return {
"project_hash": self.project_hash,
"total_count": total_count,
"success_count": success_count,
"failed_count": failed_count,
"duration_sec": duration_sec,
"results": results
}
def run_one_testcase(self, tc_id, debug=False, include_data=False):
"""
Runs a single testcase
"""
# Merge dataset variables and request variables
variables = {
"var": copy.deepcopy(
self.dataset["variables"]
) if self.dataset is not None else {},
"req": []
}
auth = self.testcases[tc_id]["authentication"]
status = "success"
results = []
time_start = datetime.now()
vprint(
1, None, u"# {0} ... ".format(
self.testcases[tc_id]["name"]
), True
)
for step in self.testcases[tc_id]["steps"]:
if step["action"] == "call_request":
caller = CurlCaller(
step["data"], variables, authentication=auth, debug=debug
)
result, req_var = caller.handle_and_get_report(
include_data=include_data
)
variables["req"].append(req_var)
results.append(result)
if result["status"] == "failed":
status = "failed"
break
if status == 'success':
vprint(
1, Colors.GREEN, u"\r# {0} ... SUCCESS".format(
self.testcases[tc_id]["name"]
)
)
else:
vprint(
1, Colors.RED, u"\r# {0} ... FAILED".format(
self.testcases[tc_id]["name"]
)
)
return {
"testcase_id": int(tc_id),
"steps_results": results,
"status": status,
"duration_sec": (datetime.now() - time_start).total_seconds()
} | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dijit/nls/dijit-all_hu.js | dojo.provide("dijit.nls.dijit-all_hu");dojo.provide("dojo.nls.colors");dojo.nls.colors._built=true;dojo.provide("dojo.nls.colors.hu");dojo.nls.colors.hu={"lightsteelblue":"világos acélkék","orangered":"narancsvörös","midnightblue":"éjkék","cadetblue":"kadétkék","seashell":"kagyló","slategrey":"palaszürke","coral":"korall","darkturquoise":"sötét türkizkék","antiquewhite":"antik fehér","mediumspringgreen":"közepes tavaszzöld","salmon":"lazacszín","darkgrey":"sötétszürke","ivory":"elefántcsont","greenyellow":"zöldessárga","mistyrose":"halvány rózsaszín","lightsalmon":"világos lazacszín","silver":"ezüst","dimgrey":"halványszürke","orange":"narancssárga","white":"fehér","navajowhite":"navajo fehér","royalblue":"királykék","deeppink":"sötétrózsaszín","lime":"lime","oldlace":"régi csipke","chartreuse":"chartreuse","darkcyan":"sötét ciánkék","yellow":"sárga","linen":"vászonfehér","olive":"olajzöld","gold":"arany","lawngreen":"fűzöld","lightyellow":"világossárga","tan":"rozsdabarna","darkviolet":"sötét ibolyaszín","lightslategrey":"világos palaszürke","grey":"szürke","darkkhaki":"sötét khakiszín","green":"zöld","deepskyblue":"sötét égszínkék","aqua":"vízszín","sienna":"vörösesbarna","mintcream":"mentaszósz","rosybrown":"barnásrózsaszín","mediumslateblue":"közepes palakék","magenta":"bíbor","lightseagreen":"világos tengerzöld","cyan":"ciánkék","olivedrab":"olajzöld drapp","darkgoldenrod":"sötét aranyvessző","slateblue":"palakék","mediumaquamarine":"közepes akvamarin","lavender":"levendula","mediumseagreen":"közepes tengerzöld","maroon":"gesztenyebarna","darkslategray":"sötét palaszürke","mediumturquoise":"közepes türkizkék","ghostwhite":"szellemfehér","darkblue":"sötétkék","mediumvioletred":"közepes ibolyavörös","brown":"barna","lightgray":"világosszürke","sandybrown":"homokbarna","pink":"rózsaszín","firebrick":"téglavörös","indigo":"indigó","snow":"hó","darkorchid":"sötét orchidea","turquoise":"türkizkék","chocolate":"csokoládé","springgreen":"tavaszzöld","moccasin":"mokkaszín","navy":"tengerészkék","lemonchiffon":"sárga műselyem","teal":"pávakék","floralwhite":"virágfehér","cornflowerblue":"búzavirágkék","paleturquoise":"halvány türkizkék","purple":"lila","gainsboro":"gainsboro","plum":"szilvakék","red":"vörös","blue":"kék","forestgreen":"erdőzöld","darkgreen":"sötétzöld","honeydew":"mézharmat","darkseagreen":"sötét tengerzöld","lightcoral":"világos korall","palevioletred":"halvány ibolyavörös","mediumpurple":"közepes lila","saddlebrown":"nyeregbarna","darkmagenta":"sötétbíbor","thistle":"bogáncs","whitesmoke":"fehér füst","wheat":"búza","violet":"ibolyaszín","lightskyblue":"világos égszínkék","goldenrod":"aranyvessző","mediumblue":"közepes kék","skyblue":"égszínkék","crimson":"karmazsinvörös","darksalmon":"sötét lazacszín","darkred":"sötétvörös","darkslategrey":"sötét palaszürke","peru":"peru","lightgrey":"világosszürke","lightgoldenrodyellow":"világos aranyvessző sárga","blanchedalmond":"hámozott mandula","aliceblue":"Alice kék","bisque":"porcelán","slategray":"palaszürke","palegoldenrod":"halvány aranyvessző","darkorange":"sötét narancssárga","aquamarine":"akvamarin","lightgreen":"világoszöld","burlywood":"nyersfa","dodgerblue":"dodger kék","darkgray":"sötétszürke","lightcyan":"világos ciánkék","powderblue":"púderkék","blueviolet":"ibolyakék","orchid":"orchidea","dimgray":"halványszürke","beige":"bézs","fuchsia":"fukszia","lavenderblush":"pirosas levendula","hotpink":"meleg rózsaszín","steelblue":"acélkék","tomato":"paradicsom","lightpink":"világos rózsaszín","limegreen":"limezöld","indianred":"indiánvörös","papayawhip":"papayahab","lightslategray":"világos palaszürke","gray":"szürke","mediumorchid":"közepes orchidea","cornsilk":"kukoricahaj","black":"fekete","seagreen":"tengerzöld","darkslateblue":"sötét palakék","khaki":"khakiszín","lightblue":"világoskék","palegreen":"halványzöld","azure":"azúrkék","peachpuff":"barackszín","darkolivegreen":"sötét olajzöld","yellowgreen":"sárgászöld"};dojo.provide("dijit.nls.loading");dijit.nls.loading._built=true;dojo.provide("dijit.nls.loading.hu");dijit.nls.loading.hu={"loadingState":"Betöltés...","errorState":"Sajnálom, hiba történt"};dojo.provide("dijit.nls.common");dijit.nls.common._built=true;dojo.provide("dijit.nls.common.hu");dijit.nls.common.hu={"buttonOk":"OK","buttonCancel":"Mégse","buttonSave":"Mentés","itemClose":"Bezárás"};dojo.provide("dijit._editor.nls.commands");dijit._editor.nls.commands._built=true;dojo.provide("dijit._editor.nls.commands.hu");dijit._editor.nls.commands.hu={"removeFormat":"Formázás eltávolítása","copy":"Másolás","paste":"Beillesztés","selectAll":"Összes kijelölése","insertOrderedList":"Számozott lista","insertTable":"Táblázat beszúrása/szerkesztése","print":"Nyomtatás","underline":"Aláhúzott","foreColor":"Előtérszín","htmlToggle":"HTML forrás","formatBlock":"Bekezdés stílusa","newPage":"Új oldal","insertHorizontalRule":"Vízszintes vonalzó","delete":"Törlés","insertUnorderedList":"Felsorolásjeles lista","tableProp":"Táblázat tulajdonságai","insertImage":"Kép beszúrása","superscript":"Felső index","subscript":"Alsó index","createLink":"Hivatkozás létrehozása","undo":"Visszavonás","fullScreen":"Váltás teljes képernyőre","italic":"Dőlt","fontName":"Betűtípus","justifyLeft":"Balra igazítás","unlink":"Hivatkozás eltávolítása","toggleTableBorder":"Táblázatszegély ki-/bekapcsolása","viewSource":"HTML forrás megjelenítése","fontSize":"Betűméret","systemShortcut":"A(z) \"${0}\" művelet a böngészőben csak billentyűparancs használatával érhető el. Használja a következőt: ${1}.","indent":"Behúzás","redo":"Újra","strikethrough":"Áthúzott","justifyFull":"Sorkizárás","justifyCenter":"Középre igazítás","hiliteColor":"Háttérszín","deleteTable":"Táblázat törlése","outdent":"Negatív behúzás","cut":"Kivágás","plainFormatBlock":"Bekezdés stílusa","toggleDir":"Irány váltókapcsoló","bold":"Félkövér","tabIndent":"Tab behúzás","justifyRight":"Jobbra igazítás","appleKey":"⌘${0}","ctrlKey":"ctrl+${0}"};dojo.provide("dojo.cldr.nls.number");dojo.cldr.nls.number._built=true;dojo.provide("dojo.cldr.nls.number.hu");dojo.cldr.nls.number.hu={"group":" ","percentSign":"%","exponential":"E","scientificFormat":"#E0","list":";","infinity":"∞","patternDigit":"#","minusSign":"-","decimal":",","nan":"NaN","nativeZeroDigit":"0","perMille":"‰","decimalFormat":"#,##0.###","currencyFormat":"#,##0.00 ¤","plusSign":"+","currencySpacing-afterCurrency-currencyMatch":"[:letter:]","currencySpacing-beforeCurrency-surroundingMatch":"[:digit:]","currencySpacing-afterCurrency-insertBetween":" ","currencySpacing-afterCurrency-surroundingMatch":"[:digit:]","currencySpacing-beforeCurrency-currencyMatch":"[:letter:]","percentFormat":"#,##0%","currencySpacing-beforeCurrency-insertBetween":" "};dojo.provide("dijit.form.nls.validate");dijit.form.nls.validate._built=true;dojo.provide("dijit.form.nls.validate.hu");dijit.form.nls.validate.hu={"rangeMessage":"Az érték kívül van a megengedett tartományon.","invalidMessage":"A megadott érték érvénytelen.","missingMessage":"Meg kell adni egy értéket."};dojo.provide("dojo.cldr.nls.currency");dojo.cldr.nls.currency._built=true;dojo.provide("dojo.cldr.nls.currency.hu");dojo.cldr.nls.currency.hu={"HKD_displayName":"Hongkongi dollár","CHF_displayName":"Svájci frank","JPY_symbol":"¥","CAD_displayName":"Kanadai dollár","CNY_displayName":"Kínai jüan renminbi","USD_symbol":"$","AUD_displayName":"Ausztrál dollár","JPY_displayName":"Japán jen","USD_displayName":"USA dollár","GBP_displayName":"Brit font sterling","EUR_displayName":"Euro","CHF_symbol":"Fr.","HKD_symbol":"HK$","CAD_symbol":"CA$","EUR_symbol":"€","CNY_symbol":"CN¥","GBP_symbol":"£","AUD_symbol":"AU$"};dojo.provide("dojo.cldr.nls.gregorian");dojo.cldr.nls.gregorian._built=true;dojo.provide("dojo.cldr.nls.gregorian.hu");dojo.cldr.nls.gregorian.hu={"field-dayperiod":"napszak","field-minute":"perc","eraNames":["időszámításunk előtt","időszámításunk szerint"],"field-weekday":"hét napja","dateFormatItem-MMdd":"MM.dd.","days-standAlone-wide":["vasárnap","hétfő","kedd","szerda","csütörtök","péntek","szombat"],"dateFormatItem-MMM":"LLL","months-standAlone-narrow":["J","F","M","Á","M","J","J","A","S","O","N","D"],"field-era":"éra","field-hour":"óra","quarters-standAlone-abbr":["N1","N2","N3","N4"],"timeFormat-full":"H:mm:ss zzzz","months-standAlone-abbr":["jan.","febr.","márc.","ápr.","máj.","jún.","júl.","aug.","szept.","okt.","nov.","dec."],"days-standAlone-narrow":["V","H","K","S","C","P","S"],"eraAbbr":["i. e.","i. sz."],"dateFormatItem-yyyyMM":"yyyy.MM","dateFormatItem-yyyyMMMM":"y. MMMM","dateFormat-long":"y. MMMM d.","timeFormat-medium":"H:mm:ss","field-zone":"zóna","dateFormatItem-Hm":"HH:mm","dateFormat-medium":"yyyy.MM.dd.","quarters-standAlone-wide":["I. negyedév","II. negyedév","III. negyedév","IV. negyedév"],"field-year":"év","quarters-standAlone-narrow":["1","2","3","4"],"months-standAlone-wide":["január","február","március","április","május","június","július","augusztus","szeptember","október","november","december"],"field-week":"hét","dateFormatItem-MMMd":"MMM d.","dateFormatItem-yyQ":"yy/Q","timeFormat-long":"H:mm:ss z","months-format-abbr":["jan.","febr.","márc.","ápr.","máj.","jún.","júl.","aug.","szept.","okt.","nov.","dec."],"timeFormat-short":"H:mm","field-month":"hónap","dateFormatItem-MMMMd":"MMMM d.","quarters-format-abbr":["N1","N2","N3","N4"],"days-format-abbr":["V","H","K","Sze","Cs","P","Szo"],"pm":"du.","dateFormatItem-mmss":"mm:ss","dateFormatItem-M":"L","days-format-narrow":["V","H","K","S","C","P","S"],"field-second":"másodperc","field-day":"nap","dateFormatItem-MEd":"M. d., E","months-format-narrow":["J","F","M","Á","M","J","J","A","S","O","N","D"],"am":"de.","days-standAlone-abbr":["V","H","K","Sze","Cs","P","Szo"],"dateFormat-short":"yyyy.MM.dd.","dateFormat-full":"y. MMMM d., EEEE","dateFormatItem-Md":"M. d.","months-format-wide":["január","február","március","április","május","június","július","augusztus","szeptember","október","november","december"],"dateFormatItem-d":"d","quarters-format-wide":["I. negyedév","II. negyedév","III. negyedév","IV. negyedév"],"days-format-wide":["vasárnap","hétfő","kedd","szerda","csütörtök","péntek","szombat"],"eraNarrow":["i. e.","i. sz."],"dateFormatItem-yQQQ":"y QQQ","dateFormatItem-yMEd":"EEE, y-M-d","dateFormatItem-MMMEd":"E MMM d","dateTimeFormats-appendItem-Day-Of-Week":"{0} {1}","dateTimeFormat-medium":"{1} {0}","dateFormatItem-EEEd":"d EEE","dateTimeFormats-appendItem-Second":"{0} ({2}: {1})","dateFormatItem-yM":"y-M","dateFormatItem-yMMM":"y MMM","dateFormatItem-yQ":"y Q","dateTimeFormats-appendItem-Era":"{0} {1}","dateTimeFormats-appendItem-Week":"{0} ({2}: {1})","dateFormatItem-ms":"mm:ss","quarters-format-narrow":["1","2","3","4"],"dateTimeFormat-long":"{1} {0}","dateTimeFormat-full":"{1} {0}","dateFormatItem-yMMMM":"y MMMM","dateTimeFormats-appendItem-Day":"{0} ({2}: {1})","dateFormatItem-y":"y","dateFormatItem-hm":"h:mm a","dateTimeFormats-appendItem-Year":"{0} {1}","dateTimeFormats-appendItem-Hour":"{0} ({2}: {1})","dateTimeFormats-appendItem-Quarter":"{0} ({2}: {1})","dateTimeFormats-appendItem-Month":"{0} ({2}: {1})","dateFormatItem-MMMMEd":"E MMMM d","dateTimeFormats-appendItem-Minute":"{0} ({2}: {1})","dateFormatItem-yMMMEd":"EEE, y MMM d","dateTimeFormats-appendItem-Timezone":"{0} {1}","dateTimeFormat-short":"{1} {0}","dateFormatItem-Hms":"H:mm:ss","dateFormatItem-hms":"h:mm:ss a"};dojo.provide("dijit.form.nls.ComboBox");dijit.form.nls.ComboBox._built=true;dojo.provide("dijit.form.nls.ComboBox.hu");dijit.form.nls.ComboBox.hu={"previousMessage":"Előző menüpontok","nextMessage":"További menüpontok"}; | PypiClean |
/Nescient-0.9.0-cp36-cp36m-win_amd64.whl/nescient/resources/banner.py | BANNER_DATA = '''R0lGODlhBAJEAOf/AAABAAACAAIAGQEEAAMBIgIFAQQHAwAGKAALBAENBwAKLgIOCAQKLwQMLAAONgATDAQPKAAROAIVDwASPwAZEgEUQQEaEwIVQgIaFAAX
QgAcFQAdFwUXRAYXRQQeEwEfGAUfFAAhFQAhGQAhGgEiFgAiGgIjFwMkGAQlGQUhQwQmGgUnGwApGwApHAYoHAAqHQArHgAsHwAtHwEuIAIuIQArSwMvIgQw
IwQxIwUyJAA0JQYzJQA1JgA1JwA2JwA3KAA4KQA5KgE6KwI7LAM8LAA+KQQ9LQU9LgA9QgA/LwA8URw0UgBAMABBMR01Ux41VABCMQJCLRg3WgFARQBDMgs/
PwRDLSA3VQVELgBERABESAZFLyQ4XAhFMA1BUR07XglGMQBJMgBHSwpHMgBKMx89YABLNAxIMwBMNR8/XCE+Yic9XABNNgBLTwBONwBMUAJPOANQOQJPUwVR
OQBQUwRPUwZSOgVQVAhSOwBVPQdRVQpTPABWPgBTYQBXPwBYPwBWWQBZQABaQQBbQgFcQwRZYgJdRAFcXwReRQZfRgBiRwhgRwBjSABkSQBlSgBmSwBjagBn
TBteYghnRwBpTgBqTwpoSABoahFkZgBpaw5pSgBtTAFqbABuTRBqSgBvTgNrbRJrSwBwTwBxUBNsTAdtbgJyUSZmagVzUgd0Ux9sbwp1VAB5Vgx2VQB6VyFu
cQB7WA93VSNvcgB8WQB9WgB+WyVxdACAXACBXSdzdQB+gwOCXip1eAaDXwCCiAmEYC13egCIYwyFYQCJZCR8fi95fACKZA+GYgWFigCLZSZ+gACMZgCNZzJ8
fyh/gQCOaACPaSmBggCQahCOYwSRawCUZy6EhgCVaC+FhwCWaQCXajGHiAKYaxmSZwaZbDOJijSKiwmabQ2bbiuPjxCcbziNjgChcgCicxOdcACjdDCSkgCk
dRWfcQCldgCmdxigcgCneCqXnAOoeS2YngepejCaoAuqewCvfg+rfDOdogCwfwCxgBKsfTWfpM7QzP///yH5BAEKAP8ALAAAAAAEAkQAAAj+AP0JHEiwoMGD
CBMqXMiwocOHECNKnEixosWLGDNq3Mixo8ePIEOKHEmypMmTKFOqXMmSZYCXMGPKnEmzps2bOGMOq9krp8+fQIMKHUr0Jo+iSJMqXcq0qdOnUKMmJahP6ksj
VmHupNmz6KqsYMMafamj5oyfKqCuEMuUjM04c+awnYt0K92nVO9CtSuza9JUThvpjem2JhamRwf7XPvzRk0ePmD+CPJSyBAimHFagbmFppmacF7KJWoIZiKY
jVLfjOSpEsxNMT/B/CTqJeCZsWDCejmLVq3fL3cJ39VVmNaXfGEymzZtZrdq1WJmg+kN6LlzQPHh4zczr0ztMN3+BDjzcsvhl0mKEAkwRMjMH5WDuIdppP5L
KC+7vZQmDVqAZ83NZMwwdglj3Et+7YILTLUEMItMrsSSWwBfrZJKKqjI9IlsL5GySSgBVOIaTJNM8pIjqQlGEyKGlPbSIILA9IcffsSEBx6jBRBHHDGxYcZn
ZpBBRhjldRaAeVhsFsVLTFwVQBEyBUGZZD4ktsNLOeCAA0xnvQTDlzC1sEILL6UVwAknmBBTCCWAMJMFFtjEWFKQSTblfDbh99J5R3pWE48B5CiUiwGcFoCK
h9ZkCUyevATbbDHVZtOEAezm4Ey7zPTLcQEkFwAzMD0Tk34vXQPTdC9VZ91P+MDEXUz+3rn6UqvjkRfAeU0GsF4A88EXk6++3ilTripK85Kon8pkDHIvHfPS
gV1lGsCCAdRCC4QyfUWhTRw+2mgA3y4awCMn2mQoTIO8FGMAf7xUYwB4yDQaoGzIVFgAY5Sn70tLBpCrfTRN6WtiZQWQw0tbBtClDDK9IJOZJ7ykZgAhvORm
TBq8VEBNjJEZwAtgvnTWWY5hqcOVARwVGXy+DjGTEUkwCUUU/R5m5JFjEBnAvWawIR4ccPA4Wrzw5pFHAH60C5Mgg6T7UmnnIqqiIy+RG4C4IToaW6QvZUjh
KpS6EoDYvGFKk3HOImfMsi+xDZN/AVAjk6kvaRMANzCF8xL+OTChg445MWEXgODuwCQPPTDRGsCrMMX60qvf5nsfFOlV9tLAMhWcWGSV4YlVAP0aGwDcoMbE
9lZpQxvcS9ReO5MrZG8bwG0abtjhS9+GO8kjViM6EyIyOb2u0u/CS3Sgc8QRWgD1EsbZFtDvCZOe/9IkhJSXv0QwlgiLDFPIHz/8UsQBTFxxABfDpAGcG9O0
1pwOBwCDyF2WfHAAKKscAMudZ/Ykk/eRyc1gorPPBMCAzQOUXHJEtHcpbV1OCwDwznUo1ZQrACa6GqO0BimYSIp2AQjb2GDSG+CwbjgyORCzAsA2t70EbnKL
Cd0CYDe8vURvAeBbANDxEh7CRHD+gmvHS+DxEsTNajvdGUhVYvKq5t0sV5J5CecKBpP7FYyKL5nSrmLGwhe+pHQwOV2znoWg1VXrJQ2qyYS0pa2tBaBbuIuj
BmMiNZkALybCewnx5CUamTQPJveSHGdg0q9ccTEmLguAwLRHlkYGoGRd+t5L4gcTFIxPYi8pgcVkkrGbjCkm8ZufwmBiv5fkL3uKtFxMoBQALupJegMUZGEQ
+BIFCqqBesxlACIowZgwAjUXtBrWRvSol3DoJR+ESRtfErsAuC4m0kphANLWqbZZMyaiiyFMZkjDmOBQHD0M5w9fEsQhFnFWsoKVEvWhj3u8yhF/HGApSsEJ
UECiBhf+4IA+9SmTkmkpYQHQAQ989RIiAOxo//niTAhUIBUmiFpnhFCElAkTVHhtNrYLgLfkiMFHZLCCibJji9C1LqS56yU3Ol6glAcTHxlwPDqD3s2SxC8m
VG8m15vS/nzAOZRlCaALA18AxuQxiGEykyBIH0zgJKehwiSU3nuJP03JyP1lUZUwYaUrYbKZfQ1wli9JYB8ZeFJ2qetFMbnjS+r4kt3BZJgcNGZMvHabZTIz
Jg+SSTRhsqkVbsVtboNbfrYpE7vFRBzgFKcPyQmTdgjRnAEwouIY9xKC/KQUoACFNTZrjT6kQA2gDW0EJnCBC8QkkopJbU18p1qw3C8qSl3+zEsYA9VRSrWK
J6tqFHn1skMGAAr46WqfcFZAmPSsecsbGkrLqrQAMI2Xal2rBdv60QBEIhIjclQnMrohSQXAa6sIb0yaOYtZ/CaNwyHOS/pazfa6kBnLcQ50ZmLYw8bEb4Az
hzkE9xLHxkQe8oiJdhTXuIHgBBSX4ARnF7zZCqihDBCO8BrKsM8Km7a1GM4wW17rlDbdBWWK+WNReKnhsASIKImVCeDwYuCaZJbBMLbGBL4ghSs84cY4XoKO
dcyBEvv4x0DOMBb1ImKikDjIUXmGYIOS4h4u1imWncmLYxxjBez4yjsmAAMiEJREwuTESP4Ja3PSXKEUOcxoTrP+mtec2ijHRLMwfgmVFcCAOttZAQc4gACC
wjk8vQTMbKbJmGvitDIH5cxCgWKYN9DJAFigfYGOiqFVa9efCAfIK74JEX/i5pfAOc4BoDKXaaJJouhUrmyOhE0GfZNJ/wTRkYbJxGjC6KVCWiZjmpNMZDAD
1OIgBxyGDOesCpNd1aRf+xpuTgDVR5TmoXiGbhqhCpWIRdAxNSWSiaqJUmmZlPeZ0woOcfxyk7W5ECfX0EZ9VQUTHfYtJ5s2YgAIrE6BLNENn6YylStQgQCU
+nyl9vesvwcDhlF1f0FIpBjIwIxzd4pAMREGewMgHGqlMa9ja+ZMLBqbjGoURCG6Ltb+TjTd6SaCRehS1x+a+67jxUVQzDPuS/E1wJd0tZBO0hVMsJe9yByl
YFmiiVBfICaYqMCSZzKBCUhA6wBkLE63lonHzEJKgGputwRlT56QPdya32R5zT6eA2dCiJic65drVdS2h9JtvKIRJtSK5sRp4jYw3mSGhlVVYt39kkzbJN6J
q4l38q3vGF9ADRco9QZe4gGZMD0mHjP4/VoWgDbk6txb2YrqKM66t1+qUnelyW0kxSEOkUKOaydR2n35EkO5qNC6bHmzZ9I8AxZGcjXvKvVyvqv5CIxzPBgy
QF9icFHGjzGMQfrAZbL4p+Nk6h8TZVQDMHyYJIbYp97V53z++8o+DVBnPPsjswO1XObqMgBlf0l0QzroRa3dE6F4VCdQHQBRXDS8y4QdCT2PC1xkSu7Pcgw7
AXHXFBPQAA2iEwDPER0BgHep8hJ71zd/80P8JUSFs2kBYA/zZhPecQmFZw3NAAzNAGOHpwYdUFqdFCeMBwIfIHVe0j0BwDlDcAiAwEWYt0IG0gvR0nkRlVew
ADsaV1EWRXqo5iGhkDszkUFSwwjWVihPg1ZmFYWyJxowF3MH9COFEVMyoXsAFACf8zntwXMEdX0GoyW7JgMhc3xlggJseFRN53QaYxPQ94LT5086QEXCFiyI
9BJaFRNcFz0xAX4yIR61NHtFQyP+uVRm6eeEdCRdjUA1bfUSqadRqHZMXfMS2kIpofdtzxR3MFEcfhVGxmB3ASA6pEJYdfOA4AROOuRDKwZE/fVYGJiBN2FZ
ceAGgOAMMOYMykAMwCALr/AKxcBgGRBaoJUB+kQBL6GCLRgTjCFKNuBIQoAJgPA5N9heKmRGFlc2IxQTmvhdGlKEMvEtMaGEq4d2rgeFSrNH8MJHPfISMxcA
WigTMwMTUKQeMOFlqJQyMFEw1WdwTzVbZSITs3Y+MLF4cBgAUQd5oAQ+kVQyjvQrV1VsOedbf6hs8vgS98IGHAkTCkQTDmRoi7h+KjJmk2AJlsAaMAFyb0R/
9gdeLxH+Nvr3eREVd+MGEweSeTMBRks2X6UCE3kXAHrDiuL0itdRTpAVWRsoeAYWB2wwBbIADMrgDLyYC8AYjKqgCq9QB9GwYBEQYV9QBqCVBhmQAaV1ARbQ
aC+BdPETjQHlAz/ACbagB6x0jQXyCwmiIAxCCxgHhDG5cajgXRtyTKFwhDARCSMXACiyhE14coQiPCsXE36QB0TzcrRnXELyPDRRj134JMbGK2JofTEBbNUX
AGgofS+wAnNydEhXPkwXcIwHFFNXW7YFgwElE8OWffTxOYQUE0byfbZyQGEVE3BBEwhlVopodsC0ejSxdixpTMfEITApOxNCNhiXRp4oTe3+9VfKcSw0QTcO
eEMwkWKu2Hcy8VgBAHjoRBOd1gavIAvwGYzyqQqncAd3kAsLpgBlIAVPoGNPwJ83pmOjNhSY8ApysBQQhRPfyC1OQUGx1hS6phYtMJshw2u+lgNYVCcxISWW
ERMAcx9R8Eo3M0DBeUA+Q5wwhwfHGYUQRAiLWCiL0ITS5QiOUF1zBC7x50bIxHFfk38z+W1pFG6cp5062TZr8zYI+BLU4JMvkQ3ZUF8BwG589zeLdZT9BRPq
uZRJJBAy0QZeKp/B2AZ1cAeScAojyFkVcAVLcGcK0KZtSgBIcQiyUAcI6hQexxQO+qBJQXR6+hQlymaW2KeK0Wn+MeGlctAGSPASd3AKttCVaHoFDBAVoBAN
cyqolnqpsfanaxaomDoXhHoTd/CeyrBgFUBjkQoTCyAUcxgAnBANYpATBFgUZLOgnaoYHEYU5AMTsFmrvNqrYfGpNhGqr0AMpPpgDhATCPATpRahmtUMWYAT
njJ3NmEps/oTllhMvuoTWTeaRWFUMbGr2Rqu4/USljIY3NRmLXYTbimsxMpZGRCWapABMGEAQhGhCtYMr3oTnjIU1eoTHoetSHYvM+F1THGrQ9GaL2GQMqGC
zzehNwGRVnd9PzCxMGEZl8GbMnGRGFlcMUGIVAgTKvpsM+FqrbcIjMAIUqMivHOS4iL+IvDXccfkXTVBNhp3Xv4HFud6N96AQzWBX4HjX0kpWelUWQZmBiVK
BEEQGTgwAzLQAuxKqv+JeAFgAFQLEwiQrC/xABKgjJk0WysQPx5oDblwoFfBBNHFUNMUAMbRVyi0l66TV/36NQw6f5QILpL4EpbAO9c2ZizymBE0I8VjPDmy
Iz3iIzsjJDH1m+axGbvXSl4YEzm1WymTWwYzE702E6k5J0iXJgVZAru6Pky1kAI5STbhlpUrmjG4W35WE933mzFhK/EYEykKE4H7ByU1E3kaTDORXY8SnT9B
szIRpGFxrjYklDWRabB4pekJE0K7ODJBEIRoJBhrW3UQjO3+ulkQoGNXUAH0ShNY+wDL2LUBYCbz44GuyjnTyynHkDZdsYMR9bah940g5Ebz9ygjsm0ZZI40
4aCwJ4XlBxOjsTxFdi+uq2w454WsVLETiTncKnQN8xIes7lHdT6xxbCiG6GUVHAPSX0ykRgENSVDEMKfmQSHpLEkCo89Qojj97/Q5lwwkX53VG2LwFqQyDso
uUEvAXK+C46zI15gAxPA+3nCGxw6GIADaBfm9ik8yR8KeIrX8MQwUbw4RA5UTBPJGwCF8xIBJm+tQm8BQBA+EgY3MwQE1SVyYL0LpgQNoGNc0G8ycbXfG77+
tpZeomDWAAxyMGxGoGgrRE1sa0b+aQS/3Ugp+FcTx7QJ9nu3GBSJIYW7wXNW/otSLrcj9GIvMDEGMpVsB+xbL5FIi6Q9VGSwvSZ9pBsAEXxJ5SO+sdVoF/zA
8lOHHBwTEkuxLzMs0yM9RTIGugy7g1hLcMFAKtrCLvwS6TdtzNnIh/kSLltMO5xM1Bl6YnOdv5GgAQCK23lNbBMgMPQSDEg3qKKzOwuB4tRDfncOQGs4AubF
XzwQHtvBUVW9w5rGAcAAS/AExxoT7RPHjrZJAYB0nza221pzqMNXZiQtgUyu8YuJtnGJOhoAIHe/jKy/joxHkPwugUt+GN1Slpxsyda46et7++hIt7owrlxU
qPx4FMz+STDRyqBEh742Ex68czNhbElgU7d8K4MEE7wcE8vDbMq1oiu3ck4DwzPhO2PWsnbbQS1ZfwydiaEHC+UaUX2Bk2PkXtekzUrKzT8ZAN+cN+K8QzNx
xVkcAFsceF5sWWaAyVdBxjEBAy0Az9drDV7wEvS8pjJhAFebtVurq2vZAqCwDZtVDLLQBpxDBEXAx+3lxwXNINaC0IOsTOLFw0rt0IziCfh7QRXEtyj3IiXF
jilFnCwVVj+i04k7Uzdn044rE56MSjzQ2lVkuWhY0nScdAXJzzHBVHIYkC4dE6W5OapLkY/rLzFhBcKFkTsdHoUYdmUVE7c7Uq23nLqLt8r+nMNKzSHO7NTd
GM0MYjZUnbZW3UUBgNVxo9UN2KQyIaWKNU6DAxNZjIHNS1nrzKU/8bScVQgwYWc3Ab41cQngwFm2MAVWAW7iOuBFoZpzqMG83Y87gEUTm3WXMcKofR98Ytzw
+CM9EwAea5ksLBNl5twvYbIlOV3jcsPTHVeD6UFN/ZfZzY02G3dFrLahCN7Z7EWlSN5P3NXcwG4BIA5VLIHlfM7uvZ7OW2Dy7RP0bQ3bcAlBod8y4Qu+oA6A
bQ2USrZP0dgEfuVJsaq1CWTtjOUZxndSAaw1Ac/DuFnbwAlEgQxqrub10A/l4N9b6eVyXmIvkMEA+WMdOecaBub+USHmNOGewshZ2xAPpcDkTa7mTr7mvlAP
jF4P+ZAP48BZxRDnev6Ea3ZqlZ7pmm4VnTbhMwHo+MlZ5RAPuiATa97oqJ7q8dDfnOUMwEinSMapimI1IIUTLxqFNlGFbPHJmGq6c0GwGk2cm84WPBsU7TDW
nDYQK3wT7ikLumjm6lAPTp7oqh4PjB4P8aAO4LDtUc5ZvPAKbQBkMvsTbiVdOWHMQLHsSIHYM4HpQ0HKYmHgkhQTvh5QdyhQP2EERQAzuTIzwDWwryuwRaZA
n400NJI0kRkThJK7jWyjDPpGG8JxF4J/EhJCsVNenseDCNIL2cgXLTSK04AsWU0TT6r+DTkunjlExU9WE+gZE/ZgD/R2D/dAtALhlJ9hJEUwBKcGAy9QvbJw
ptCO7UI/9OrwDd+w7d9QeG0QqneAMgm3K3pyjQ7lF7hgQrzRlxWvTLcRmKIgG8dECqTwnDGxKCZCNSlrGgFQGoZACH87E5RZmSsV2jJHQLp8JANkBXoCBTbF
yUTgZbze2ihTMBDJtF7C85Q0VCpgJqkcACiNPgHQeAkZhzQxm3QIE/Uuy0PR7zDh6V8l8DLxkf9rUksDEy7C8I1M60NxUbITkxefV1YfTX6hQh5fgNikTeVt
3jIxxe92E7LIvIH3OPXmDzkivagEkdUL9IIe5dtg9Eb/gTD+Vgx1AOvb2i9SX0ZDuo2fF8Tf6DWC+RJ0axMmQi5mv3qNeUevd37L3WzqjsIZ2XW4fMA0sdrE
tj2nC0nEV/lhMtsT83ieG5sJybAAEUDgwBUDDcIwGMCGjRsJA+gY6EPgj4FChhBJaGRglIFWBm4BOTCMQDMD2QiEIzCOwDkD8Rj8M1DQIJoBDAVAZJCRwEYD
e04aaEmgp6ECSQXo9CkhqoSrDLoKABVWgFkCaQWoJXCXwF4JhQkcZtCYwLEOA1AbWO3a2gDZtL0dGE6guIHoBJpDZy7AuXMCz7VrB0+wvIT4BPJL6E+xv5YC
twhkMlEgjoF3XuWyllnzZs6dPXP+jhbtUACKBqGQTRg2wNcAvVxvDYBLYFaqs2BNjSow1sBVqVIJFDVQqdFNZgUKDQA0QM/lAhcJzCmQkMBBAmMmzPOSpUqz
ZrwLHBngsXiBWKxAOR0gskMiRIYIDCI5AA+BOwTmmCzQxgwZCAP4N6igAFAQyASBSBAohIE8EEiDgQowTsCEANQvv4Hsi0i++BzSaCAoooDCI8dGBI8kkwJw
Y6CVApgDDxcTuo66gQi5SSeeDHLEIOSICoDHowIYTqAgeTNoN6momi1J2GAbiLVhnhzGmLKkZGaahKQx6BqzvBFIrgDEIYccuwQaU6+9BGpHIHgcMuwwfu65
R6DFGBv+aAzIBBLivspekaWZz/4E1JpoOLskgA0HygO1hI45ZqBfEpINK4Gqeko3gZwKgKkAghMyoVCK0lEg5Zhj7rkAEplRxgBiTEi7xlY0qKQAZA1vvPGw
SGg9gzp8z1D56HtIzwAaUmhChwQkMAADA0AwAAUFYjAAB40jyCwKiw2AsvoSkog0+BwqIqHTOPqIxDDCOIOM7wJg4yQVt9POID8MEmSgm2ociBHmblQuAOQi
AfXHIIckcqDdckMygKsEwiXSrgxyUixFHUJrIC0d4qbLuQIgxyAz+zozgDQDWNMgewxCTKA452QxxfDSy3MyG2QIoI5X/Aw058wuASaXXHD+zgyUAIgwIomB
pCzLoGNY46oX2CKlrTbcoArg4AB60xRIpYb7pJPiBKokoX5v5GlfVAUyhJDqrMMuu4EaCyAOWElat1YSAzgvPfWMNsi9XjcsbT4dMMwWB20DmIG/gV54IUAV
VDDIBBOaLcEgDzSY1gIIjWthcYGuncFCiLjVMCOH0hORPNXDM5FdFN9lUaB4BZpXplRxGshUUgfq1xKABwrlayA7NQtTSxGmNOoAmHx4tYFUUzTpK7McKJuB
Mg7AS7oCGDOAj/1CU02D6Eko5QBWXoxa4+qoAzOddSbmkFf4/LmZaApVP3/99+e/f///B2AA+Te6AHZOgPpbwQr+Gvc5GMhABqFbCLECkIPB6QBYpPlB4IQg
BIwEoAhFKBrf9IYrx4QkAKyblUDclZK4bUd2MElITfB1tp00x4bJMcjvBBI84QjpE58QRdauVqSEJM8gTGrNL5gWAOgFYErGqJKVBCINaVRMIGwJwFuwl72N
kcmLIQPZyEhGGHrYwx74aFMA3hSnALCMf3eogzOcAYj35ewSnLiEFtpgM1nIwhZ0OGAgBTlIQhYygAQEoIQMuUhGNpKRyDEOwRw5SUruz436qwNnQFHHzESj
Cpq5RCgdcgdSkrKSp0RlKlW5Sla28pHG6UQnXDnLRl6SWkPIZGcuUUdi5KINxMCE0PT+10G9EZI2lKJlMpXJPxScYJnPhGY0pRlIW65PM9FwhjKK0cc26MwZ
tnhFG9rQv3AFoJjqa9RAmpeQY07TnfoLQgZ/0K1CJguAm9vftWzwzkFOh3+OGBs/HekNL6GymqMkRjGKwYtcyGJ+86sDIDChGU54xhkOHWf/RMi/panTOO0U
aEgHEjhG2tOQK2jBAv+TkNCZBZEkPSBITBgAMgxEViiCw4rmsFMXvSh//tTfvpJjCUjukIfEG4gQV2G8/ClveRDjnxSnKJBuVKMaV8yGW7QhEC4ZJy9m8gtf
BuIOwZgsjfwwXxsXo48XCoQMZxCIZR4KUTmw7wb75AQeN9n+GWfML6OTyQF+5NNBvpHQidLzivNao5UkKWxSA3EF1aq21ACkQojCCdLXeGSQSYytEZ/NXSLO
VqO1/cG0AfBD7WTn07jBql03rROJHoOrDxXTCB0yC0yBRTgc3ECC/AEQ4xKCAuIWaFnOEggIBqIBC1hAfSj13EoNsk+XGgSmAhxPAOwUALi2zl0tfFv/gJq/
fT1iIDrcrMCIp1Smqs+pu2jeEtUn1QBgSSBXdchWA9BVh3Tve+ELgDsEQhiBnHEgaDXInNg6vNdpNwBy2GMp71AawlF3r5whhl8NIlhgbahD6amXxMySzkc9
DDa0AanBjoepyw5HlgH4VADCZpb+USXEVPZS1XVqV7vZGWRF7oLtCWU7nvScs5wOkWdpdmuhgbR0IP4x4ECS5UxmJSi5DRKIcwOAzwBZq8kzoG62KCiQlw4k
CEGIWQc7lAQm6MoKhs3udlN40+/ObXapHch1QjwIGg0kEYt4DmiX04gcBZRHm30x1wSSNUxZjWpUq0pWnNqVh0WMidETCH3tGwD8GkS/3gD1XML0Re6BD8Al
I3AATmYYxKRVwXfhDk0FIiK+beiC2rrwZoCh4YGMrjQx65CImIQ04zDNNYyVlGMTFhVXHAxTLE7ID7smkK/N2CCP6CzZcpcQ0rJtVbRzyezmEAcWAjkh27WV
hwSityT+3NY4ut3WZA7n5M8xLsoCSZbkJmflACg3ABvI8nMFgtIF+odm2NIWAYEVuF8LpINHNue6y3M3OZcEtm6AAwtdgihwfzsAa/NndGhINuaY1yCbDR4p
Ej284DD6UpAVCG5msbD3ckWxqlHNlDJtkE13WiDWG0hXt1cXoof1LwIhKzxSvWo1Nn0gLNMHG2OXQtWtp1eBG10Oci0ocP5VIN3aUAfXQ8JnYPrSUPXoU5MN
UqodzNmLjnZCqi02geTohgKpIY4/7m0dJ6Qxr1Jh60Qi247cKQB8K2cHzZyQJRuEWPSWrkPynZBn+TtzAuFytaJrcIG0VFuCnc/XR4onhwv+5Mi6IhcJZZpd
wbsu1m0NQKJQ622QQ0cgI7eh3U0+kM0ORL1KablAUrHUpR6MajIfSM0FokSBpDPnitJ0WqhnEKEnpHvdG8jIBDzgAgukTVJXq2ICEHWDzEHjcm4zbjMYAB/w
gD5bb4ZfyxyAXh2eCekZT9nJIiWwLMogzdsFXECxxxKIyHK7l8sUh5A24PEEa+MdbPMJQTsV0bIXteG7juspHyO3wKM6gxgDE8quIhuIJPig0rMuh+CBHcCQ
HOgtg3Cg4HqBexuQgZCcZuk3EIgWaWEuzOOczROIg/M8YREI9zOIDemVhzM8gUi9u0kI2Kozg4gX2VOt05IOtLn+PbwjOd4JCof4EaTKmt9QMYSBtORLCCRi
micRi6SJvvuyGIfoqoIiNewLGZIZCPIJgDZpE1dLn0EKnK0rBl4TqUAURFfSsudqgSgDkMRxMhwILIPgAXqCDzQbiNsqpzZzMzgzjiDDuLkJgNnJA9ValRAL
ABqZISzMPc4iKk8RHqTilMoiogJ0BeSbjVrAhV3YCjM0iCZyIoOYBmiAhp5Tiyu6Bq3iqoEQBzCpC3MAq70AGTUZjIFIo8NIsD0UpJgRiIrSjFwAxEHkxm5s
pB1sJNBTH2v0xv0pKmjSRcQSxIMipF2yhmbAqHKUx3nkJ0SiFnKkR2qJhN5bpnQsR3b+pJbu0h9s3LVXkANubC9KAjRWkrN8dMiHhMhkAshMZELjuISLgqj8
Yb39kRIoWSSrqaS8+x/ZCyAUSiUJQpxJKsSI3J8eAyCh4p9VZMlVOqi0Sojv2h8xmJ87aCR1LKSEHCS7s5FF4kSzMEn/UTz/8a0mc6VDhK4ng4FrSQgVJCBI
jA+LaA+iwS0AUpcgU5GdgkIYMa0YIcXoSAiYHBtI8gQfESDJSphagEuzaJ506j9qeQZoMAir8rks0oYt+hKDkMMAQiMEe7r00YcFQys08rsN7ECZKg8soIKB
AITRwBsriMwSGo+zqYZuQAtpwEvjQKx0MjZbTL64jDm3RMD+qxGiH+qhH1lLHVGOSXAECWwERqihRDBLfxLFP8Cz2HOJV5EbjZuVddGumQoALDCs01gPI0g8
v/EVwIHEhKAM6jq4/1AplCoIFTCpgQiBEPC3aLm8CIkularOhFA40Vu8gUjKAKqp1guvqetEGBmI2rO9ALgxtAwV3jOKtjQII0o2dYqvXKQW+gqAbmjDnws6
Yxy6wDS1/UkjqbskNrLJALDDxnCX9kS3iVOdjdzIi8Ev+/pMhzAGjwwA0bQ5g4iUgaAU1GwvTeEUF3uxGJsx5DA5ofSJK8SdURSIEOs73yy/17MptxKIDDUI
EQERw0s8MzsU9HQIbQkdyFsgAUH+loSonACoUvAMuC07Fh9MCOr6vIFYuCLsG4NIAjbbiDf7iJkSyNZJkZXQKY6bPY/buznFnT+zTRtijrQ0iBjrQtaEO1e8
mlVwtP60isayuQB9HrMj0AI9UKBL0L/smFK7i7xgRrESmbISCHqwwzs0CDaCOjiRxoSww0hZEVnBUMIrIYMgA7i5GAOtL4HQPxH1P8VaHiZxqlmwDdS0GqwB
DuJ5MRhrQB4hKktQDhs1RdFChBrxp9LqOJLEA7AEr1gRUgdTHfGwAhFhN0o0QV8ZqfYzjt9ysqiM0oHDNyoViCvFsgBYyYTAznuLyuoMwvrYAQsaPW5Vz10x
vPRQPRL+WVPXwziW2Cm4eQmSHEtRJEV8uc8b0lPg2U8GGw5OAcNGg7llg0vTVLv4OgYoeb5dDIBpuMtN4zSrupiLedTt6ZgxQdkGFaNM7T5OHQhPXSuVCdXx
ER8cCoBya53u6td+BToPnaoA0D8CDU2BkK+nKSLjeLsEdFikirEtxBGHYI6zuR0excC2ArwmnFYiHQ8jHcGEULxDUbJ4GwiUbCn/GNcAgJwZNIgq/c50Xdcu
UyQXHAgbOBxHrNclHRoyzVdU3a5+dZcUcaH47ESS/DZWmQ58OZUI5Kz83KGGlbbh0JSIPR6EwY3/jBRbdI1HKVEB3b+BkCqQvaLQpb4u4pj+MoG1OQywkqHQ
MjoZl5XZ8POH8QNV74vGAJAHeSgZMRo3nKU6WfGOm3oL/QoAn33VAPhMZkgIdVwa1nAaJKrYgbgNXR0Iy8oaP0WKk6sEayPW3ZPAAMg73MQXmlgb1OpNkgxY
FQFcurkpIrVWrh3BEhSIIciTJZ2nbrFHsoXKs51SyisBfwuAywnPzAsAKX2ygTg4LzXPhFg/r9Vb9ZC440RVWRO89IUV7XiJKfQ2Hc1R+7zRfVnYgEGqpHK5
qnkKt1yY2EgIRK1LnevYgQBdn3PULRo6STWT/zKIktlU73vZwhS/QTrhmZwmUOQfr3RI/+0fCVGpqExEJhscJLuWrqTskPXQV8xMiK40CI0LWIHtuBhZm/GV
2jvdHVGZhFTskez1PcwaEkYT1ISIRYOghedN4SbhXBbu2LvMy73kS7+EQ/+i1LASGXfYvkw1I9olTDmZk0NG5ERW5EVm5EZ25EeG5EiW5Emm5Eq25EvG5EzW
5E3m5E725E8G5VAW5VEm5VI25VNG5VRW5VVm5VZ25Veek4AAADs=
''' | PypiClean |
/ImSwitchUC2-2.1.0.tar.gz/ImSwitchUC2-2.1.0/imswitch/imcontrol/view/widgets/AlignmentLineWidget.py | from qtpy import QtCore, QtWidgets
from imswitch.imcommon.view.guitools import naparitools
from imswitch.imcontrol.view import guitools
from .basewidgets import Widget
class AlignmentLineWidget(Widget):
""" Alignment widget that displays a line on top of the image in the viewbox."""
sigAlignmentLineMakeClicked = QtCore.Signal()
sigAlignmentCheckToggled = QtCore.Signal(bool) # (enabled)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Graphical elements
self.angleEdit = QtWidgets.QLineEdit('30')
self.alignmentCheck = QtWidgets.QCheckBox('Show Alignment Tool')
self.alignmentLineMakerButton = guitools.BetterPushButton('Alignment Line')
self.alignmentLine = naparitools.VispyLineVisual(movable=True)
# Add items to GridLayout
alignmentLayout = QtWidgets.QGridLayout()
self.setLayout(alignmentLayout)
alignmentLayout.addWidget(QtWidgets.QLabel('Line Angle'), 0, 0)
alignmentLayout.addWidget(self.angleEdit, 0, 1)
alignmentLayout.addWidget(self.alignmentLineMakerButton, 1, 0)
alignmentLayout.addWidget(self.alignmentCheck, 1, 1)
# Connect signals
self.alignmentLineMakerButton.clicked.connect(self.sigAlignmentLineMakeClicked)
self.alignmentCheck.toggled.connect(self.sigAlignmentCheckToggled)
def getAngleInput(self):
return float(self.angleEdit.text())
def setLineAngle(self, angle):
self.alignmentLine.angle = angle
def setLineVisibility(self, visible):
self.alignmentLine.setVisible(visible)
# Copyright (C) 2020-2021 ImSwitch developers
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/charting/action2d/Tooltip.js | if(!dojo._hasResource["dojox.charting.action2d.Tooltip"]){
dojo._hasResource["dojox.charting.action2d.Tooltip"]=true;
dojo.provide("dojox.charting.action2d.Tooltip");
dojo.require("dojox.charting.action2d.Base");
dojo.require("dijit.Tooltip");
dojo.require("dojox.lang.functional");
dojo.require("dojox.lang.functional.scan");
dojo.require("dojox.lang.functional.fold");
(function(){
var _1=function(o){
var t=o.run&&o.run.data&&o.run.data[o.index];
if(t&&typeof t!="number"&&(t.tooltip||t.text)){
return t.tooltip||t.text;
}
if(o.element=="candlestick"){
return "<table cellpadding=\"1\" cellspacing=\"0\" border=\"0\" style=\"font-size:0.9em;\">"+"<tr><td>Open:</td><td align=\"right\"><strong>"+o.data.open+"</strong></td></tr>"+"<tr><td>High:</td><td align=\"right\"><strong>"+o.data.high+"</strong></td></tr>"+"<tr><td>Low:</td><td align=\"right\"><strong>"+o.data.low+"</strong></td></tr>"+"<tr><td>Close:</td><td align=\"right\"><strong>"+o.data.close+"</strong></td></tr>"+(o.data.mid!==undefined?"<tr><td>Mid:</td><td align=\"right\"><strong>"+o.data.mid+"</strong></td></tr>":"")+"</table>";
}
return o.element=="bar"?o.x:o.y;
};
var df=dojox.lang.functional,_2=Math.PI/4,_3=Math.PI/2;
dojo.declare("dojox.charting.action2d.Tooltip",dojox.charting.action2d.Base,{defaultParams:{text:_1},optionalParams:{},constructor:function(_4,_5,_6){
this.text=_6&&_6.text?_6.text:_1;
this.connect();
},process:function(o){
if(o.type==="onplotreset"||o.type==="onmouseout"){
dijit.hideTooltip(this.aroundRect);
this.aroundRect=null;
return;
}
if(!o.shape||o.type!=="onmouseover"){
return;
}
var _7={type:"rect"},_8=["after","before"];
switch(o.element){
case "marker":
_7.x=o.cx;
_7.y=o.cy;
_7.width=_7.height=1;
break;
case "circle":
_7.x=o.cx-o.cr;
_7.y=o.cy-o.cr;
_7.width=_7.height=2*o.cr;
break;
case "column":
_8=["above","below"];
case "bar":
_7=dojo.clone(o.shape.getShape());
break;
case "candlestick":
_7.x=o.x;
_7.y=o.y;
_7.width=o.width;
_7.height=o.height;
break;
default:
if(!this.angles){
if(typeof o.run.data[0]=="number"){
this.angles=df.map(df.scanl(o.run.data,"+",0),"* 2 * Math.PI / this",df.foldl(o.run.data,"+",0));
}else{
this.angles=df.map(df.scanl(o.run.data,"a + b.y",0),"* 2 * Math.PI / this",df.foldl(o.run.data,"a + b.y",0));
}
}
var _9=(this.angles[o.index]+this.angles[o.index+1])/2;
_7.x=o.cx+o.cr*Math.cos(_9);
_7.y=o.cy+o.cr*Math.sin(_9);
_7.width=_7.height=1;
if(_9<_2){
}else{
if(_9<_3+_2){
_8=["below","above"];
}else{
if(_9<Math.PI+_2){
_8=["before","after"];
}else{
if(_9<2*Math.PI-_2){
_8=["above","below"];
}
}
}
}
break;
}
var lt=dojo.coords(this.chart.node,true);
_7.x+=lt.x;
_7.y+=lt.y;
_7.x=Math.round(_7.x);
_7.y=Math.round(_7.y);
_7.width=Math.ceil(_7.width);
_7.height=Math.ceil(_7.height);
this.aroundRect=_7;
dijit.showTooltip(this.text(o),this.aroundRect,_8);
}});
})();
} | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/AppCenter/simpleui/static/admin/simpleui-x/elementui/utils/aria-utils.js | 'use strict';
exports.__esModule = true;
var aria = aria || {};
aria.Utils = aria.Utils || {};
/**
* @desc Set focus on descendant nodes until the first focusable element is
* found.
* @param element
* DOM node for which to find the first focusable descendant.
* @returns
* true if a focusable element is found and focus is set.
*/
aria.Utils.focusFirstDescendant = function (element) {
for (var i = 0; i < element.childNodes.length; i++) {
var child = element.childNodes[i];
if (aria.Utils.attemptFocus(child) || aria.Utils.focusFirstDescendant(child)) {
return true;
}
}
return false;
};
/**
* @desc Find the last descendant node that is focusable.
* @param element
* DOM node for which to find the last focusable descendant.
* @returns
* true if a focusable element is found and focus is set.
*/
aria.Utils.focusLastDescendant = function (element) {
for (var i = element.childNodes.length - 1; i >= 0; i--) {
var child = element.childNodes[i];
if (aria.Utils.attemptFocus(child) || aria.Utils.focusLastDescendant(child)) {
return true;
}
}
return false;
};
/**
* @desc Set Attempt to set focus on the current node.
* @param element
* The node to attempt to focus on.
* @returns
* true if element is focused.
*/
aria.Utils.attemptFocus = function (element) {
if (!aria.Utils.isFocusable(element)) {
return false;
}
aria.Utils.IgnoreUtilFocusChanges = true;
try {
element.focus();
} catch (e) {
}
aria.Utils.IgnoreUtilFocusChanges = false;
return document.activeElement === element;
};
aria.Utils.isFocusable = function (element) {
if (element.tabIndex > 0 || element.tabIndex === 0 && element.getAttribute('tabIndex') !== null) {
return true;
}
if (element.disabled) {
return false;
}
switch (element.nodeName) {
case 'A':
return !!element.href && element.rel !== 'ignore';
case 'INPUT':
return element.type !== 'hidden' && element.type !== 'file';
case 'BUTTON':
case 'SELECT':
case 'TEXTAREA':
return true;
default:
return false;
}
};
/**
* 触发一个事件
* mouseenter, mouseleave, mouseover, keyup, change, click 等
* @param {Element} elm
* @param {String} name
* @param {*} opts
*/
aria.Utils.triggerEvent = function (elm, name) {
var eventName = void 0;
if (/^mouse|click/.test(name)) {
eventName = 'MouseEvents';
} else if (/^key/.test(name)) {
eventName = 'KeyboardEvent';
} else {
eventName = 'HTMLEvents';
}
var evt = document.createEvent(eventName);
for (var _len = arguments.length, opts = Array(_len > 2 ? _len - 2 : 0), _key = 2; _key < _len; _key++) {
opts[_key - 2] = arguments[_key];
}
evt.initEvent.apply(evt, [name].concat(opts));
elm.dispatchEvent ? elm.dispatchEvent(evt) : elm.fireEvent('on' + name, evt);
return elm;
};
aria.Utils.keys = {
tab: 9,
enter: 13,
space: 32,
left: 37,
up: 38,
right: 39,
down: 40,
esc: 27
};
exports.default = aria.Utils; | PypiClean |
/EQcorrscan-0.4.4.tar.gz/EQcorrscan-0.4.4/eqcorrscan/tutorials/template_creation.py | import logging
from obspy.clients.fdsn import Client
from obspy.core.event import Catalog
from eqcorrscan.utils.catalog_utils import filter_picks
from eqcorrscan.core import template_gen
# Set up logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s")
def mktemplates(
network_code='GEONET', plot=True, public_ids=None):
"""Functional wrapper to make templates"""
public_ids = public_ids or [
'2016p008122', '2016p008353', '2016p008155', '2016p008194']
client = Client(network_code)
# We want to download a few events from an earthquake sequence, these are
# identified by publiID numbers, given as arguments
catalog = Catalog()
for public_id in public_ids:
try:
catalog += client.get_events(
eventid=public_id, includearrivals=True)
except TypeError:
# Cope with some FDSN services not implementing includearrivals
catalog += client.get_events(eventid=public_id)
# Lets plot the catalog to see what we have
if plot:
catalog.plot(projection='local', resolution='h')
# We don't need all the picks, lets take the information from the
# five most used stations - note that this is done to reduce computational
# costs.
catalog = filter_picks(catalog, top_n_picks=5)
# We only want the P picks in this example, but you can use others or all
# picks if you want.
for event in catalog:
for pick in event.picks:
if pick.phase_hint == 'S':
event.picks.remove(pick)
# Now we can generate the templates
templates = template_gen.template_gen(
method='from_client', catalog=catalog, client_id=network_code,
lowcut=2.0, highcut=9.0, samp_rate=20.0, filt_order=4, length=3.0,
prepick=0.15, swin='all', process_len=3600, plot=plot)
# We now have a series of templates! Using Obspy's Stream.write() method we
# can save these to disk for later use. We will do that now for use in the
# following tutorials.
for i, template in enumerate(templates):
template.write('tutorial_template_' + str(i) + '.ms', format='MSEED')
# Note that this will warn you about data types. As we don't care
# at the moment, whatever obspy chooses is fine.
return
if __name__ == '__main__':
"""Wrapper for template creation"""
import sys
import warnings
if not len(sys.argv) > 1:
warnings.warn('Needs a network ID followed by a list of event IDs, ' +
'will run the test case instead')
mktemplates()
else:
net_code = sys.argv[1]
idlist = list(sys.argv)[2:]
print(idlist)
mktemplates(network_code=net_code, public_ids=idlist) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/colorama/ansi.py | CSI = '\033['
OSC = '\033]'
BEL = '\007'
def code_to_chars(code):
return CSI + str(code) + 'm'
def set_title(title):
return OSC + '2;' + title + BEL
def clear_screen(mode=2):
return CSI + str(mode) + 'J'
def clear_line(mode=2):
return CSI + str(mode) + 'K'
class AnsiCodes(object):
def __init__(self):
# the subclasses declare class attributes which are numbers.
# Upon instantiation we define instance attributes, which are the same
# as the class attributes but wrapped with the ANSI escape sequence
for name in dir(self):
if not name.startswith('_'):
value = getattr(self, name)
setattr(self, name, code_to_chars(value))
class AnsiCursor(object):
def UP(self, n=1):
return CSI + str(n) + 'A'
def DOWN(self, n=1):
return CSI + str(n) + 'B'
def FORWARD(self, n=1):
return CSI + str(n) + 'C'
def BACK(self, n=1):
return CSI + str(n) + 'D'
def POS(self, x=1, y=1):
return CSI + str(y) + ';' + str(x) + 'H'
class AnsiFore(AnsiCodes):
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 90
LIGHTRED_EX = 91
LIGHTGREEN_EX = 92
LIGHTYELLOW_EX = 93
LIGHTBLUE_EX = 94
LIGHTMAGENTA_EX = 95
LIGHTCYAN_EX = 96
LIGHTWHITE_EX = 97
class AnsiBack(AnsiCodes):
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
# These are fairly well supported, but not part of the standard.
LIGHTBLACK_EX = 100
LIGHTRED_EX = 101
LIGHTGREEN_EX = 102
LIGHTYELLOW_EX = 103
LIGHTBLUE_EX = 104
LIGHTMAGENTA_EX = 105
LIGHTCYAN_EX = 106
LIGHTWHITE_EX = 107
class AnsiStyle(AnsiCodes):
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiFore()
Back = AnsiBack()
Style = AnsiStyle()
Cursor = AnsiCursor() | PypiClean |
/Idmeneo_cdQa-0.0.tar.gz/Idmeneo_cdQa-0.0/retriever/vectorizers.py | import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from .text_transformers import BM25Transformer
class BM25Vectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of BM25 features and computes
scores of the documents based on a query
Vectorizer inspired on the sklearn.feature_extraction.text.TfidfVectorizer
class
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'} (default='strict')
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None} (default=None)
Remove accents and perform other character normalization
during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
Both 'ascii' and 'unicode' use NFKD normalization from
:func:`unicodedata.normalize`.
lowercase : boolean (default=True)
Convert all characters to lowercase before tokenizing.
preprocessor : callable or None (default=None)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default=None)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries; n-grams at the edges of words are padded with space.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
.. versionchanged:: 0.21
Since v0.21, if ``input`` is ``filename`` or ``file``, the data is
first read from the file and then passed to the given callable
analyzer.
stop_words : string {'english'}, list, or None (default=None)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
There are several known issues with 'english' and you should
consider an alternative (see :ref:`stop_words`).
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
ngram_range : tuple (min_n, max_n) (default=(1, 1))
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
max_df : float in range [0.0, 1.0] or int (default=1.0)
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int (default=1)
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None (default=None)
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional (default=None)
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean (default=False)
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional (default=float64)
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional (default='l2')
Each output row will have unit norm, either:
* 'l2': Sum of squares of vector elements is 1. The cosine
similarity between two vectors is their dot product when l2 norm has
been applied.
* 'l1': Sum of absolute values of vector elements is 1.
See :func:`preprocessing.normalize`
use_idf : boolean (default=True)
Enable inverse-document-frequency reweighting.
k1 : float, optional (default=2.0)
term k1 in the BM25 formula
b : float, optional (default=0.75)
term b in the BM25 formula
floor : float or None, optional (default=None)
floor value for idf terms
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
idf_ : array, shape (n_features)
The inverse document frequency (IDF) vector; only defined
if ``use_idf`` is True.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
"""
def __init__(
self,
input="content",
encoding="utf-8",
decode_error="strict",
strip_accents=None,
lowercase=True,
preprocessor=None,
tokenizer=None,
analyzer="word",
stop_words=None,
token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 2),
max_df=1.0,
min_df=1,
max_features=None,
vocabulary=None,
binary=False,
dtype=np.float64,
norm=None,
use_idf=True,
k1=2.0,
b=0.75,
floor=None,
):
super().__init__(
input=input,
encoding=encoding,
decode_error=decode_error,
strip_accents=strip_accents,
lowercase=lowercase,
preprocessor=preprocessor,
tokenizer=tokenizer,
analyzer=analyzer,
stop_words=stop_words,
token_pattern=token_pattern,
ngram_range=ngram_range,
max_df=max_df,
min_df=min_df,
max_features=max_features,
vocabulary=vocabulary,
binary=binary,
dtype=dtype,
)
self._bm25 = BM25Transformer(norm, use_idf, k1, b)
# Broadcast the BM25 parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._bm25.norm
@norm.setter
def norm(self, value):
self._bm25.norm = value
@property
def use_idf(self):
return self._bm25.use_idf
@use_idf.setter
def use_idf(self, value):
self._bm25.use_idf = value
@property
def k1(self):
return self._bm25.k1
@k1.setter
def k1(self, value):
self._bm25.k1 = value
@property
def b(self):
return self._bm25.b
@b.setter
def b(self, value):
self._bm25.b = value
@property
def idf_(self):
return self._bm25.idf_
@idf_.setter
def idf_(self, value):
self._validate_vocabulary()
if hasattr(self, "vocabulary_"):
if len(self.vocabulary_) != len(value):
raise ValueError(
"idf length = %d must be equal "
"to vocabulary size = %d" % (len(value), len(self.vocabulary))
)
self._bm25.idf_ = value
def fit(self, raw_documents, y=None):
"""
Learn vocabulary and BM25 stats from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : BM25Vectorizer
"""
X = super().fit_transform(raw_documents)
self._bm25.fit(X)
return self
def transform(self, raw_corpus, is_query=False):
"""
Vectorizes the input, whether it is a query or the list of documents
Parameters
----------
raw_corpus : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
vectors : sparse matrix, [n_queries, n_documents]
scores from BM25 statics for each document with respect to each query
"""
X = super().transform(raw_corpus) if is_query else None
return self._bm25.transform(X, copy=False, is_query=is_query)
def fit_transform(self, raw_documents, y=None):
"""
Learn vocabulary, idf and BM25 features. Return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
BM25 document-term matrix.
"""
X = super().fit_transform(raw_documents)
self._bm25.fit(X)
return self._bm25.transform(X, copy=False) | PypiClean |
/Finance-Hermes-0.3.6.tar.gz/Finance-Hermes-0.3.6/hermes/factors/technical/core/overlap.py | import numpy as np
import pandas as pd
from hermes.factors.technical.core.utilities import *
def dema(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
ema1 = ema(close=close, length=length)
ema2 = ema(close=ema1, length=length)
dema = 2 * ema1 - ema2
# Offset
if offset != 0:
dema = dema.shift(offset)
# Handle fills
if "fillna" in kwargs:
dema.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
dema.fillna(method=kwargs["fill_method"], inplace=True)
return dema
def ema(close, length=None, offset=None, **kwargs):
"""Indicator: Exponential Moving Average (EMA)"""
# Validate Arguments
length = int(length) if length and length > 0 else 10
adjust = kwargs.pop("adjust", False)
sma = kwargs.pop("sma", True)
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
if sma:
close = close.copy()
sma_nth = close[0:length].mean()
close[:length - 1] = np.nan
close.iloc[length - 1] = sma_nth
ema = close.ewm(span=length, adjust=adjust).mean()
# Offset
if offset != 0:
ema = ema.shift(offset)
# Handle fills
if "fillna" in kwargs:
ema.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
ema.fillna(method=kwargs["fill_method"], inplace=True)
return ema
def fwma(close, length=None, asc=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
asc = asc if asc else True
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
fibs = fibonacci(n=length, weighted=True)
fwma = close.rolling(length, min_periods=length).apply(weights(fibs),
raw=True)
# Offset
if offset != 0:
fwma = fwma.shift(offset)
# Handle fills
if "fillna" in kwargs:
fwma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
fwma.fillna(method=kwargs["fill_method"], inplace=True)
return fwma
def hl2(high, low, offset=None, **kwargs):
offset = int(offset) if isinstance(offset, int) else 0
hl2 = 0.5 * (high + low)
# Offset
if offset != 0:
hl2 = hl2.shift(offset)
return hl2
def hlc3(high, low, close, offset=None, **kwargs):
offset = int(offset) if isinstance(offset, int) else 0
hlc3 = (high + low + close) / 3.0
# Offset
if offset != 0:
hlc3 = hlc3.shift(offset)
return hlc3
def hma(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
half_length = int(length / 2)
sqrt_length = int(np.sqrt(length))
wmaf = wma(close=close, length=half_length)
wmas = wma(close=close, length=length)
hma = wma(close=2 * wmaf - wmas, length=sqrt_length)
# Offset
if offset != 0:
hma = hma.shift(offset)
# Handle fills
if "fillna" in kwargs:
hma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
hma.fillna(method=kwargs["fill_method"], inplace=True)
return hma
def ichimoku(high,
low,
close,
tenkan=None,
kijun=None,
senkou=None,
include_chikou=True,
offset=None,
**kwargs):
offset = int(offset) if isinstance(offset, int) else 0
tenkan = int(tenkan) if tenkan and tenkan > 0 else 9
kijun = int(kijun) if kijun and kijun > 0 else 26
senkou = int(senkou) if senkou and senkou > 0 else 52
# Calculate Result
tenkan_sen = midprice(high=high, low=low, length=tenkan)
kijun_sen = midprice(high=high, low=low, length=kijun)
span_a = 0.5 * (tenkan_sen + kijun_sen)
span_b = midprice(high=high, low=low, length=senkou)
# Copy Span A and B values before their shift
_span_a = span_a[-kijun:].copy()
_span_b = span_b[-kijun:].copy()
span_a = span_a.shift(kijun)
span_b = span_b.shift(kijun)
chikou_span = close.shift(-kijun)
# Offset
if offset != 0:
tenkan_sen = tenkan_sen.shift(offset)
kijun_sen = kijun_sen.shift(offset)
span_a = span_a.shift(offset)
span_b = span_b.shift(offset)
chikou_span = chikou_span.shift(offset)
# Handle fills
if "fillna" in kwargs:
span_a.fillna(kwargs["fillna"], inplace=True)
span_b.fillna(kwargs["fillna"], inplace=True)
chikou_span.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
span_a.fillna(method=kwargs["fill_method"], inplace=True)
span_b.fillna(method=kwargs["fill_method"], inplace=True)
chikou_span.fillna(method=kwargs["fill_method"], inplace=True)
return span_a, span_b
def midpoint(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 2
min_periods = int(
kwargs["min_periods"]) if "min_periods" in kwargs and kwargs[
"min_periods"] is not None else length
offset = int(offset) if isinstance(offset, int) else 0
lowest = close.rolling(length, min_periods=min_periods).min()
highest = close.rolling(length, min_periods=min_periods).max()
midpoint = 0.5 * (lowest + highest)
# Offset
if offset != 0:
midpoint = midpoint.shift(offset)
# Handle fills
if "fillna" in kwargs:
midpoint.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
midpoint.fillna(method=kwargs["fill_method"], inplace=True)
return midpoint
def midprice(high, low, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 2
min_periods = int(
kwargs["min_periods"]) if "min_periods" in kwargs and kwargs[
"min_periods"] is not None else length
offset = int(offset) if isinstance(offset, int) else 0
lowest_low = low.rolling(length, min_periods=min_periods).min()
highest_high = high.rolling(length, min_periods=min_periods).max()
midprice = 0.5 * (lowest_low + highest_high)
# Offset
if offset != 0:
midprice = midprice.shift(offset)
# Handle fills
if "fillna" in kwargs:
midprice.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
midprice.fillna(method=kwargs["fill_method"], inplace=True)
return midprice
def ohlc4(open, high, low, close, offset=None, **kwargs):
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
ohlc4 = 0.25 * (open + high + low + close)
# Offset
if offset != 0:
ohlc4 = ohlc4.shift(offset)
return ohlc4
def pwma(close, length=None, asc=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
asc = asc if asc else True
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
triangle = pascals_triangle(n=length - 1, weighted=True)
pwma = close.rolling(length, min_periods=length).apply(weights(triangle),
raw=True)
# Offset
if offset != 0:
pwma = pwma.shift(offset)
# Handle fills
if "fillna" in kwargs:
pwma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
pwma.fillna(method=kwargs["fill_method"], inplace=True)
return pwma
def rma(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
alpha = (1.0 / length) if length > 0 else 0.5
rma = close.ewm(alpha=alpha, min_periods=length).mean()
# Offset
if offset != 0:
rma = rma.shift(offset)
# Handle fills
if "fillna" in kwargs:
rma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
rma.fillna(method=kwargs["fill_method"], inplace=True)
return rma
def sma(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
min_periods = int(
kwargs["min_periods"]) if "min_periods" in kwargs and kwargs[
"min_periods"] is not None else length
offset = int(offset) if isinstance(offset, int) else 0
sma = close.rolling(length, min_periods=min_periods).mean()
# Offset
if offset != 0:
sma = sma.shift(offset)
# Handle fills
if "fillna" in kwargs:
sma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
sma.fillna(method=kwargs["fill_method"], inplace=True)
return sma
def ssf(close, length=None, poles=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
poles = int(poles) if poles in [2, 3] else 2
offset = int(offset) if isinstance(offset, int) else 0
m = close.shape[0]
ssf = close.copy()
if poles == 3:
x = np.pi / length # x = PI / n
a0 = np.exp(-x) # e^(-x)
b0 = 2 * a0 * np.cos(np.sqrt(3) * x) # 2e^(-x)*cos(3^(.5) * x)
c0 = a0 * a0 # e^(-2x)
c4 = c0 * c0 # e^(-4x)
c3 = -c0 * (1 + b0) # -e^(-2x) * (1 + 2e^(-x)*cos(3^(.5) * x))
c2 = c0 + b0 # e^(-2x) + 2e^(-x)*cos(3^(.5) * x)
c1 = 1 - c2 - c3 - c4
for i in range(0, m):
ssf.iloc[i] = c1 * close.iloc[i] + c2 * ssf.iloc[
i - 1] + c3 * ssf.iloc[i - 2] + c4 * ssf.iloc[i - 3]
else: # poles == 2
x = np.pi * np.sqrt(2) / length # x = PI * 2^(.5) / n
a0 = np.exp(-x) # e^(-x)
a1 = -a0 * a0 # -e^(-2x)
b1 = 2 * a0 * np.cos(x) # 2e^(-x)*cos(x)
c1 = 1 - a1 - b1 # e^(-2x) - 2e^(-x)*cos(x) + 1
for i in range(0, m):
ssf.iloc[i] = c1 * close.iloc[i] + b1 * ssf.iloc[
i - 1] + a1 * ssf.iloc[i - 2]
# Offset
if offset != 0:
ssf = ssf.shift(offset)
# Handle fills
if "fillna" in kwargs:
ssf.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
ssf.fillna(method=kwargs["fill_method"], inplace=True)
return ssf
def swma(close, length=None, asc=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
asc = asc if asc else True
offset = int(offset) if isinstance(offset, int) else 0
# Calculate Result
triangle = symmetric_triangle(length, weighted=True)
swma = close.rolling(length, min_periods=length).apply(weights(triangle),
raw=True)
# Offset
if offset != 0:
swma = swma.shift(offset)
# Handle fills
if "fillna" in kwargs:
swma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
swma.fillna(method=kwargs["fill_method"], inplace=True)
return swma
def t3(close, length=None, a=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
a = float(a) if a and a > 0 and a < 1 else 0.7
offset = int(offset) if isinstance(offset, int) else 0
c1 = -a * a**2
c2 = 3 * a**2 + 3 * a**3
c3 = -6 * a**2 - 3 * a - 3 * a**3
c4 = a**3 + 3 * a**2 + 3 * a + 1
e1 = ema(close=close, length=length, **kwargs)
e2 = ema(close=e1, length=length, **kwargs)
e3 = ema(close=e2, length=length, **kwargs)
e4 = ema(close=e3, length=length, **kwargs)
e5 = ema(close=e4, length=length, **kwargs)
e6 = ema(close=e5, length=length, **kwargs)
t3 = c1 * e6 + c2 * e5 + c3 * e4 + c4 * e3
# Offset
if offset != 0:
t3 = t3.shift(offset)
# Handle fills
if "fillna" in kwargs:
t3.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
t3.fillna(method=kwargs["fill_method"], inplace=True)
return t3
def tema(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
ema1 = ema(close=close, length=length, **kwargs)
ema2 = ema(close=ema1, length=length, **kwargs)
ema3 = ema(close=ema2, length=length, **kwargs)
tema = 3 * (ema1 - ema2) + ema3
# Offset
if offset != 0:
tema = tema.shift(offset)
# Handle fills
if "fillna" in kwargs:
tema.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
tema.fillna(method=kwargs["fill_method"], inplace=True)
return tema
def trima(close, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
half_length = round(0.5 * (length + 1))
sma1 = sma(close, length=half_length)
trima = sma(sma1, length=half_length)
# Offset
if offset != 0:
trima = trima.shift(offset)
# Handle fills
if "fillna" in kwargs:
trima.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
trima.fillna(method=kwargs["fill_method"], inplace=True)
return trima
def vwma(close, volume, length=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
offset = int(offset) if isinstance(offset, int) else 0
pv = close * volume
vwma = sma(close=pv, length=length) / sma(close=volume, length=length)
# Handle fills
if "fillna" in kwargs:
vwma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
vwma.fillna(method=kwargs["fill_method"], inplace=True)
return vwma
def wcp(high, low, close, offset=None, **kwargs):
offset = int(offset) if isinstance(offset, int) else 0
wcp = (high + low + 2 * close) / 4
# Offset
if offset != 0:
wcp = wcp.shift(offset)
# Handle fills
if "fillna" in kwargs:
wcp.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
wcp.fillna(method=kwargs["fill_method"], inplace=True)
return wcp
def wma(close, length=None, asc=None, offset=None, **kwargs):
length = int(length) if length and length > 0 else 10
asc = asc if asc else True
offset = int(offset) if isinstance(offset, int) else 0
total_weight = 0.5 * length * (length + 1)
weights_ = pd.Series(np.arange(1, length + 1))
weights = weights_ if asc else weights_[::-1]
def linear(w):
def _compute(x):
return np.dot(x, w) / total_weight
return _compute
close_ = close.rolling(length, min_periods=length)
wma = close_.apply(linear(weights), raw=True)
# Offset
if offset != 0:
wma = wma.shift(offset)
# Handle fills
if "fillna" in kwargs:
wma.fillna(kwargs["fillna"], inplace=True)
if "fill_method" in kwargs:
wma.fillna(method=kwargs["fill_method"], inplace=True)
return wma | PypiClean |
/NucleoATAC-0.3.4.tar.gz/NucleoATAC-0.3.4/pyatac/get_cov.py | import os
import multiprocessing as mp
import itertools
import numpy as np
import pysam
import traceback
import pyximport; pyximport.install()
from pyatac.tracks import CoverageTrack
from pyatac.chunk import ChunkList
from pyatac.utils import shell_command, read_chrom_sizes_from_bam
from pyatac.chunkmat2d import FragmentMat2D
def _covHelper(arg):
"""Computes coverage track for a particular set of bed regions"""
(chunk, args) = arg
try:
offset = args.window / 2
mat = FragmentMat2D(chunk.chrom,chunk.start - offset, chunk.end + offset, args.lower, args.upper, args.atac)
mat.makeFragmentMat(args.bam)
cov = CoverageTrack(chunk.chrom, chunk.start, chunk.end)
cov.calculateCoverage(mat, lower = args.lower, upper = args.upper, window_len = args.window)
cov.vals *= args.scale / float(args.window)
except Exception as e:
print('Caught exception when processing:\n'+ chunk.asBed()+"\n")
traceback.print_exc()
print()
raise e
return cov
def _writeCov(track_queue, out):
out_handle = open(out + '.cov.bedgraph','a')
try:
for track in iter(track_queue.get, 'STOP'):
track.write_track(out_handle)
track_queue.task_done()
except Exception, e:
print('Caught exception when writing coverage track\n')
traceback.print_exc()
print()
raise e
out_handle.close()
return True
def get_cov(args, bases = 50000, splitsize = 1000):
"""function to get coverages
"""
if not args.out:
if args.bed is None:
args.out = '.'.join(os.path.basename(args.bam).split('.')[0:-1])
else:
args.out = '.'.join(os.path.basename(args.bed).split('.')[0:-1])
if args.bed is None:
chrs = read_chrom_sizes_from_bam(args.bam)
chunks = ChunkList.convertChromSizes(chrs, splitsize = splitsize)
sets = chunks.split(items = bases/splitsize)
else:
chunks = ChunkList.read(args.bed)
chunks.merge()
sets = chunks.split(bases = bases)
maxQueueSize = max(2,int(2 * bases / np.mean([chunk.length() for chunk in chunks])))
pool1 = mp.Pool(processes = max(1,args.cores-1))
out_handle = open(args.out + '.cov.bedgraph','w')
out_handle.close()
write_queue = mp.JoinableQueue(maxsize = maxQueueSize)
write_process = mp.Process(target = _writeCov, args=(write_queue, args.out))
write_process.start()
for j in sets:
tmp = pool1.map(_covHelper, zip(j,itertools.repeat(args)))
for track in tmp:
write_queue.put(track)
pool1.close()
pool1.join()
write_queue.put('STOP')
write_process.join()
pysam.tabix_compress(args.out + '.cov.bedgraph', args.out + '.cov.bedgraph.gz', force = True)
shell_command('rm ' + args.out + '.cov.bedgraph')
pysam.tabix_index(args.out + '.cov.bedgraph.gz', preset = "bed", force = True) | PypiClean |
/OctoPrint-1.9.2.tar.gz/OctoPrint-1.9.2/src/octoprint/vendor/sockjs/tornado/session.py | from __future__ import absolute_import, division, print_function, unicode_literals
"""
sockjs.tornado.session
~~~~~~~~~~~~~~~~~~~~~~
SockJS session implementation.
"""
import asyncio
import functools
import logging
from octoprint.vendor.sockjs.tornado import periodic, proto, sessioncontainer
from octoprint.vendor.sockjs.tornado.util import bytes_to_str, get_current_ioloop
LOG = logging.getLogger("tornado.general")
def ensure_io_loop(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
if get_current_ioloop():
method(self, *args, **kwargs)
else:
def run():
method(self, *args, **kwargs)
self.server.io_loop.add_callback(run)
return wrapper
class ConnectionInfo(object):
"""Connection information object.
Will be passed to the ``on_open`` handler of your connection class.
Has few properties:
`ip`
Caller IP address
`cookies`
Collection of cookies
`arguments`
Collection of the query string arguments
`headers`
Collection of headers sent by the browser that established this
connection
`path`
Request uri path
"""
def __init__(self, ip, cookies, arguments, headers, path):
self.ip = ip
self.cookies = cookies
self.arguments = arguments
self.headers = headers
self.path = path
def get_argument(self, name):
"""Return single argument by name"""
val = self.arguments.get(name)
if val:
return val[0]
return None
def get_cookie(self, name):
"""Return single cookie by its name"""
return self.cookies.get(name)
def get_header(self, name):
"""Return single header by its name"""
return self.headers.get(name)
# Session states
CONNECTING = 0
OPEN = 1
CLOSING = 2
CLOSED = 3
class BaseSession(object):
"""Base session implementation class"""
def __init__(self, conn, server):
"""Base constructor.
`conn`
Connection class
`server`
SockJSRouter instance
"""
self.server = server
self.stats = server.stats
self.send_expects_json = False
self.handler = None
self.state = CONNECTING
self.conn_info = None
self.conn = conn(self)
self.close_reason = None
def set_handler(self, handler):
"""Set transport handler
``handler``
Handler, should derive from the `sockjs.tornado.transports.base.BaseTransportMixin`.
"""
if self.handler is not None:
raise Exception('Attempted to overwrite BaseSession handler')
self.handler = handler
self.transport_name = self.handler.name
if self.conn_info is None:
self.conn_info = handler.get_conn_info()
self.stats.on_sess_opened(self.transport_name)
return True
def verify_state(self):
"""Verify if session was not yet opened. If it is, open it and call connections `on_open`"""
if self.state == CONNECTING:
self.state = OPEN
self.conn.on_open(self.conn_info)
def remove_handler(self, handler):
"""Remove active handler from the session
`handler`
Handler to remove
"""
# Attempt to remove another handler
if self.handler != handler:
raise Exception('Attempted to remove invalid handler')
self.handler = None
def close(self, code=3000, message='Go away!'):
"""Close session or endpoint connection.
`code`
Closing code
`message`
Close message
"""
if self.state != CLOSED:
try:
self.conn.on_close()
except Exception:
LOG.debug("Failed to call on_close().", exc_info=True)
finally:
self.state = CLOSED
self.close_reason = (code, message)
self.conn = None
# Bump stats
self.stats.on_sess_closed(self.transport_name)
# If we have active handler, notify that session was closed
if self.handler is not None:
self.handler.session_closed()
def delayed_close(self):
"""Delayed close - won't close immediately, but on next ioloop tick."""
self.state = CLOSING
self.server.io_loop.add_callback(self.close)
def get_close_reason(self):
"""Return last close reason tuple.
For example:
if self.session.is_closed:
code, reason = self.session.get_close_reason()
"""
if self.close_reason:
return self.close_reason
return (3000, 'Go away!')
@property
def is_closed(self):
"""Check if session was closed."""
return self.state == CLOSED or self.state == CLOSING
def send_message(self, msg, stats=True, binary=False):
"""Send or queue outgoing message
`msg`
Message to send
`stats`
If set to True, will update statistics after operation completes
"""
raise NotImplementedError()
def send_jsonified(self, msg, stats=True):
"""Send or queue outgoing message which was json-encoded before. Used by the `broadcast`
method.
`msg`
JSON-encoded message to send
`stats`
If set to True, will update statistics after operation completes
"""
raise NotImplementedError()
def broadcast(self, clients, msg):
"""Optimized `broadcast` implementation. Depending on type of the session, will json-encode
message once and will call either `send_message` or `send_jsonifed`.
`clients`
Clients iterable
`msg`
Message to send
"""
self.server.broadcast(clients, msg)
class Session(BaseSession, sessioncontainer.SessionMixin):
"""SockJS session implementation.
"""
def __init__(self, conn, server, session_id, expiry=None):
"""Session constructor.
`conn`
Default connection class
`server`
`SockJSRouter` instance
`session_id`
Session id
`expiry`
Session expiry time
"""
# Initialize session
sessioncontainer.SessionMixin.__init__(self, session_id, expiry)
BaseSession.__init__(self, conn, server)
self.send_queue = ''
self.send_expects_json = True
# Heartbeat related stuff
self._heartbeat_timer = None
self._heartbeat_interval = self.server.settings['heartbeat_delay'] * 1000
self._immediate_flush = self.server.settings['immediate_flush']
self._pending_flush = False
self._verify_ip = self.server.settings['verify_ip']
# Session callbacks
def on_delete(self, forced):
"""Session expiration callback
`forced`
If session item explicitly deleted, forced will be set to True. If
item expired, will be set to False.
"""
# Do not remove connection if it was not forced and there's running connection
if not forced and self.handler is not None and not self.is_closed:
self.promote()
else:
self.close()
# Add session
def set_handler(self, handler, start_heartbeat=True):
"""Set active handler for the session
`handler`
Associate active Tornado handler with the session
`start_heartbeat`
Should session start heartbeat immediately
"""
# Check if session already has associated handler
if self.handler is not None:
handler.send_pack(proto.disconnect(2010, "Another connection still open"))
return False
if self._verify_ip and self.conn_info is not None:
# If IP address doesn't match - refuse connection
if handler.request.remote_ip != self.conn_info.ip:
LOG.error('Attempted to attach to session %s (%s) from different IP (%s)' % (
self.session_id,
self.conn_info.ip,
handler.request.remote_ip
))
handler.send_pack(proto.disconnect(2010, "Attempted to connect to session from different IP"))
return False
if (self.state == CLOSING or self.state == CLOSED) and not self.send_queue:
handler.send_pack(proto.disconnect(*self.get_close_reason()))
return False
# Associate handler and promote session
super(Session, self).set_handler(handler)
self.promote()
if start_heartbeat:
self.start_heartbeat()
return True
@ensure_io_loop
def verify_state(self):
"""Verify if session was not yet opened. If it is, open it and call connections `on_open`"""
# If we're in CONNECTING state - send 'o' message to the client
if self.state == CONNECTING:
self.handler.send_pack(proto.CONNECT)
# Call parent implementation
super(Session, self).verify_state()
def remove_handler(self, handler):
"""Detach active handler from the session
`handler`
Handler to remove
"""
super(Session, self).remove_handler(handler)
self.promote()
self.stop_heartbeat()
def send_message(self, msg, stats=True, binary=False):
"""Send or queue outgoing message
`msg`
Message to send
`stats`
If set to True, will update statistics after operation completes
"""
self.send_jsonified(proto.json_encode(bytes_to_str(msg)), stats)
@ensure_io_loop
def send_jsonified(self, msg, stats=True):
"""Send JSON-encoded message
`msg`
JSON encoded string to send
`stats`
If set to True, will update statistics after operation completes
"""
msg = bytes_to_str(msg)
if self._immediate_flush:
if self.handler and self.handler.active and not self.send_queue:
# Send message right away
self.handler.send_pack('a[%s]' % msg)
else:
if self.send_queue:
self.send_queue += ','
self.send_queue += msg
self.flush()
else:
if self.send_queue:
self.send_queue += ','
self.send_queue += msg
if not self._pending_flush:
self.server.io_loop.add_callback(self.flush)
self._pending_flush = True
if stats:
self.stats.on_pack_sent(1)
@ensure_io_loop
def flush(self):
"""Flush message queue if there's an active connection running"""
self._pending_flush = False
if self.handler is None or not self.handler.active or not self.send_queue:
return
self.handler.send_pack('a[%s]' % self.send_queue)
self.send_queue = ''
@ensure_io_loop
def close(self, code=3000, message='Go away!'):
"""Close session.
`code`
Closing code
`message`
Closing message
"""
if self.state != CLOSED:
# Notify handler
if self.handler is not None:
self.handler.send_pack(proto.disconnect(code, message))
super(Session, self).close(code, message)
# Heartbeats
def start_heartbeat(self):
"""Reset hearbeat timer"""
self.stop_heartbeat()
self._heartbeat_timer = periodic.Callback(self._heartbeat,
self._heartbeat_interval,
self.server.io_loop)
self._heartbeat_timer.start()
def stop_heartbeat(self):
"""Stop active heartbeat"""
if self._heartbeat_timer is not None:
self._heartbeat_timer.stop()
self._heartbeat_timer = None
def delay_heartbeat(self):
"""Delay active heartbeat"""
if self._heartbeat_timer is not None:
self._heartbeat_timer.delay()
@ensure_io_loop
def _heartbeat(self):
"""Heartbeat callback"""
if self.handler is not None:
self.handler.send_pack(proto.HEARTBEAT)
else:
self.stop_heartbeat()
def on_messages(self, msg_list):
"""Handle incoming messages
`msg_list`
Message list to process
"""
self.stats.on_pack_recv(len(msg_list))
for msg in msg_list:
if self.state == OPEN:
self.conn.on_message(msg) | PypiClean |
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/loaders/mdv.py | import pairlist as pl
import numpy as np
import re
import logging
desc = {"ref": {},
"brief": "MDView file (in Angdtrom).",
"usage": "No options available."
}
if __name__[-4:] == 'mdva':
desc["brief"] = "MDView file (in au)."
def load_iter(file, oname, hname=None):
logger = logging.getLogger()
if __name__[-4:] == "mdva":
conv = 0.052917721067 # au in nm
else:
conv = 0.1 # AA in nm
natom = -100
lineno = 0
for line in iter(file.readline, ""):
lineno += 1
if lineno == 1:
# first line
cols = line.split()
assert cols[0] == '#' # yaga style
c = [float(x) for x in cols[1:4]]
cellmat = np.array([[c[0], 0., 0.],
[0., c[1], 0.],
[0., 0., c[2]]])
cellmat *= conv
celli = np.linalg.inv(cellmat)
elif line[0] == "-":
continue
elif natom < 0:
natom = int(line)
hatoms = []
oatoms = []
skipped = set()
elif natom > 0:
natom -= 1
cols = line.split()
atomname = cols[0]
# atomid = int(line[15:20])
pos = np.array([float(x) for x in cols[1:4]]) * conv
if re.fullmatch(oname, atomname):
oatoms.append(pos)
elif hname is not None and re.fullmatch(hname, atomname):
hatoms.append(pos)
else:
if atomname not in skipped:
logger.info("Skip {0}".format(atomname))
skipped.add(atomname)
# finalize the frame
if natom == 0:
# fractional coordinate
oatoms = np.array(oatoms) @ celli
if len(hatoms) > 0:
hatoms = np.array(hatoms) @ celli
else:
hatoms = None
yield oatoms, hatoms, cellmat
natom = -100
lineno = 0 | PypiClean |
/DataTig-0.5.0.tar.gz/DataTig-0.5.0/datatig/cli.py | import argparse
import datatig.process
def main() -> None:
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="subparser_name")
build_parser = subparsers.add_parser("build")
build_parser.add_argument("source")
build_parser.add_argument(
"--staticsiteoutput", help="Location of Static Site Output"
)
build_parser.add_argument("--staticsiteurl", help="URL for Static Site Output")
build_parser.add_argument("--sqliteoutput", help="Location of SQLite file Output")
build_parser.add_argument(
"--frictionlessoutput", help="Location of Frictionless Data file Output"
)
check_parser = subparsers.add_parser("check")
check_parser.add_argument("source")
versionedbuild_parser = subparsers.add_parser("versionedbuild")
versionedbuild_parser.add_argument("source")
versionedbuild_parser.add_argument(
"--sqliteoutput", help="Location of SQLite file Output"
)
versionedbuild_parser.add_argument(
"--staticsiteoutput", help="Location of Static Site Output"
)
versionedbuild_parser.add_argument(
"--staticsiteurl", help="URL for Static Site Output"
)
versionedbuild_parser.add_argument(
"--refs", help="Refs to build, comma sep", default=""
)
versionedbuild_parser.add_argument(
"--allbranches", help="Build all branches", action="store_true"
)
versionedbuild_parser.add_argument(
"--defaultref", help="The Default ref.", default=""
)
args = parser.parse_args()
if args.subparser_name == "build":
staticsite_output = args.staticsiteoutput
sqlite_output = args.sqliteoutput
frictionless_output = args.frictionlessoutput
if not staticsite_output and not sqlite_output and not frictionless_output:
print("You must specify one of the build options when running build.")
exit(-1)
datatig.process.go(
args.source,
staticsite_output=staticsite_output,
staticsite_url=args.staticsiteurl,
sqlite_output=sqlite_output,
frictionless_output=frictionless_output,
check_errors=True,
check_record_errors=False,
verbose=True,
sys_exit=True,
)
elif args.subparser_name == "check":
datatig.process.go(
args.source,
check_errors=True,
check_record_errors=True,
verbose=True,
sys_exit=True,
)
elif args.subparser_name == "versionedbuild":
staticsite_output = args.staticsiteoutput
sqlite_output = args.sqliteoutput
if not staticsite_output and not sqlite_output:
print("You must specify one of the build options when running build.")
exit(-1)
datatig.process.versioned_build(
args.source,
staticsite_output=staticsite_output,
staticsite_url=args.staticsiteurl,
sqlite_output=sqlite_output,
refs_str=args.refs,
all_branches=args.allbranches,
default_ref=args.defaultref,
)
if __name__ == "__main__":
main() | PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/services/ows/wps/parameters/data_types.py |
from datetime import datetime, date, time, timedelta
from django.utils.dateparse import parse_date, parse_datetime, parse_time, utc
from django.utils.six import PY2, PY3, string_types
from django.utils.encoding import smart_str
from eoxserver.core.util.timetools import parse_duration
try:
from datetime import timezone
# as this class will be deprecated in Django 3.1, offer a constructor
def FixedOffset(offset, name=None):
if isinstance(offset, timedelta):
pass
else:
offset = timedelta(minutes=offset)
return timezone(offset) if name is None else timezone(offset, name)
except ImportError:
from django.utils.timezone import FixedOffset
class BaseType(object):
""" Base literal data type class.
This class defines the class interface.
"""
name = None # to be replaced by a name
if PY2:
dtype = str
elif PY3:
dtype = bytes
zero = None # when applicable to be replaced by a proper zero value
comparable = True # indicate whether the type can be compared (<,>,==)
@classmethod
def parse(cls, raw_value):
""" Cast or parse input to its proper representation."""
return cls.dtype(raw_value)
@classmethod
def encode(cls, value):
""" Encode value to a Unicode string."""
return smart_str(value)
@classmethod
def get_diff_dtype(cls): # difference type - change if differs from the base
""" Get type of the difference of this type.
E.g., `timedelta` for a `datetime`.
"""
return cls
@classmethod
def as_number(cls, value): # pylint: disable=unused-argument
""" convert to a number (e.g., duration)"""
raise TypeError("Data type %s cannot be converted to a number!" % cls)
@classmethod
def sub(cls, value0, value1): # pylint: disable=unused-argument
""" subtract value0 - value1 """
raise TypeError("Data type %s cannot be subtracted!" % cls)
class Boolean(BaseType):
""" Boolean literal data type class. """
name = "boolean"
dtype = bool
@classmethod
def parse(cls, raw_value):
if isinstance(raw_value, string_types):
raw_value = smart_str(raw_value.lower())
if raw_value in ('1', 'true'):
return True
elif raw_value in ('0', 'false'):
return False
else:
raise ValueError("Cannot parse boolean value '%s'!" % raw_value)
else:
return bool(raw_value)
@classmethod
def encode(cls, value):
return u'true' if value else u'false'
@classmethod
def as_number(cls, value):
return int(value)
@classmethod
def sub(cls, value0, value1):
""" subtract value0 - value1 """
return value0 - value1
class Integer(BaseType):
""" Integer literal data type class. """
name = "integer"
dtype = int
zero = 0
@classmethod
def encode(cls, value):
""" Encode value to a Unicode string."""
return smart_str(int(value))
@classmethod
def as_number(cls, value):
return value
@classmethod
def sub(cls, value0, value1):
""" subtract value0 - value1 """
return value0 - value1
class Double(BaseType):
""" Double precision float literal data type class. """
name = "double"
dtype = float
zero = 0.0
@classmethod
def encode(cls, value):
return u"%.15g" % cls.dtype(value)
@classmethod
def as_number(cls, value):
return value
@classmethod
def sub(cls, value0, value1):
""" subtract value0 - value1 """
return value0 - value1
class String(BaseType):
""" Unicode character string literal data type class. """
name = "string"
if PY2:
dtype = unicode
elif PY3:
dtype = str
encoding = 'utf-8'
comparable = False # disabled although Python implements comparable strings
@classmethod
def encode(cls, value):
""" Encode value to a Unicode string."""
try:
return smart_str(value)
except UnicodeDecodeError:
return smart_str(value, cls.encoding)
@classmethod
def parse(cls, raw_value):
return cls.dtype(raw_value)
@classmethod
def get_diff_dtype(cls): # string has no difference
return None
class Duration(BaseType):
""" Duration (`datetime.timedelta`) literal data type class. """
name = "duration"
dtype = timedelta
zero = timedelta(0)
@classmethod
def parse(cls, raw_value):
if isinstance(raw_value, cls.dtype):
return raw_value
return parse_duration(raw_value)
@classmethod
def encode(cls, value):
# NOTE: USE OF MONTH AND YEAR IS AMBIGUOUS! WE DO NOT ENCODE THEM!
if not isinstance(value, cls.dtype):
raise ValueError("Invalid value type '%s'!" % type(value))
items = []
if value.days < 0:
items.append('-')
value = -value
items.append('P')
if value.days != 0:
items.append('%dD' % value.days)
elif value.seconds == 0 and value.microseconds == 0:
items.append('T0S') # zero interval
if value.seconds != 0 or value.microseconds != 0:
minutes, seconds = divmod(value.seconds, 60)
hours, minutes = divmod(minutes, 60)
items.append('T')
if hours != 0:
items.append('%dH' % hours)
if minutes != 0:
items.append('%dM' % minutes)
if value.microseconds != 0:
items.append("%.6fS" % (seconds + 1e-6*value.microseconds))
elif seconds != 0:
items.append('%dS' % seconds)
return smart_str("".join(items))
@classmethod
def as_number(cls, value):
return 86400.0*value.days + 1.0*value.seconds + 1e-6*value.microseconds
@classmethod
def sub(cls, value0, value1):
""" subtract value0 - value1 """
return value0 - value1
class Date(BaseType):
""" Date (`datetime.date`) literal data type class. """
name = "date"
dtype = date
@classmethod
def get_diff_dtype(cls):
return Duration
@classmethod
def parse(cls, raw_value):
if isinstance(raw_value, cls.dtype):
return raw_value
value = parse_date(raw_value)
if value:
return value
raise ValueError("Could not parse ISO date from '%s'." % raw_value)
@classmethod
def encode(cls, value):
if isinstance(value, cls.dtype):
return smart_str(value.isoformat())
raise ValueError("Invalid value type '%s'!" % type(value))
@classmethod
def sub(cls, value0, value1):
""" subtract value0 - value1 """
return value0 - value1
class Time(BaseType):
""" Time (`datetime.time`) literal data type class. """
name = "time"
dtype = time
# TODO: implement proper Time time-zone handling
@classmethod
def get_diff_dtype(cls):
return Duration
@classmethod
def parse(cls, raw_value):
if isinstance(raw_value, cls.dtype):
return raw_value
value = parse_time(raw_value)
if value is not None:
return value
raise ValueError("Could not parse ISO time from '%s'." % raw_value)
@classmethod
def encode(cls, value):
if isinstance(value, cls.dtype):
return smart_str(value.isoformat())
raise ValueError("Invalid value type '%s'!" % type(value))
@classmethod
def sub(cls, value0, value1):
""" subtract value0 - value1 """
aux_date = datetime.now().date()
dt0 = datetime.combine(aux_date, value0)
dt1 = datetime.combine(aux_date, value1)
return dt0 - dt1
class DateTime(BaseType):
""" Date-time (`datetime.datetime`) literal data type class. """
name = "dateTime"
dtype = datetime
# tzinfo helpers
UTC = utc # Zulu-time TZ instance
TZOffset = FixedOffset # fixed TZ offset class, set minutes to instantiate
@classmethod
def get_diff_dtype(cls):
return Duration
@classmethod
def parse(cls, raw_value):
if isinstance(raw_value, cls.dtype):
return raw_value
value = parse_datetime(raw_value)
if value:
return value
raise ValueError("Could not parse ISO date-time from '%s'." % raw_value)
@classmethod
def encode(cls, value):
if isinstance(value, cls.dtype):
return smart_str(cls._isoformat(value))
raise ValueError("Invalid value type '%s'!" % type(value))
@classmethod
def sub(cls, value0, value1):
""" subtract value0 - value1 """
return value0 - value1
@staticmethod
def _isoformat(value):
""" Covert date-time object to ISO 8601 date-time string. """
if value.tzinfo and not value.utcoffset():
return value.replace(tzinfo=None).isoformat("T") + "Z"
return value.isoformat("T")
class DateTimeTZAware(DateTime):
""" Time-zone aware date-time (`datetime.datetime`) literal data type class.
This data-type is a variant of the `DateTime` which assures that
the parsed date-time is time-zone aware and optionally
also converted to a common target time-zone.
The default time-zone applied to the unaware time-input is passed trough
the constructor. By default the UTC time-zone is used.
By default the target time-zone is set to None which means that
the original time-zone is preserved.
Unlike the `DateTime` this class must be instantiated and it cannot be used
directly as a data-type.
Constructor parameters:
default_tz default time-zone
target_tz optional target time-zone
"""
def __init__(self, default_tz=DateTime.UTC, target_tz=None):
self.default_tz = default_tz
self.target_tz = target_tz
def set_time_zone(self, value):
""" Make a date-time value time-zone aware by setting the default
time-zone and convert the time-zone if the target time-zone is given.
"""
if value.tzinfo is None:
value = value.replace(tzinfo=self.default_tz)
if self.target_tz:
value = value.astimezone(self.target_tz)
return value
def parse(self, raw_value):
return self.set_time_zone(super(DateTimeTZAware, self).parse(raw_value))
def encode(self, value):
return super(DateTimeTZAware, self).encode(self.set_time_zone(value))
# mapping of plain Python types to data type classes
if PY3:
DTYPES = {
bytes: String,
str: String,
bool: Boolean,
int: Integer,
float: Double,
date: Date,
datetime: DateTime,
time: Time,
timedelta: Duration,
}
elif PY2:
DTYPES = {
str: String,
unicode: String,
bool: Boolean,
int: Integer,
long: Integer,
float: Double,
date: Date,
datetime: DateTime,
time: Time,
timedelta: Duration,
} | PypiClean |
/DLTA-AI-1.1.tar.gz/DLTA-AI-1.1/DLTA_AI_app/labelme/shape.py | import copy
import math
from qtpy import QtCore
from qtpy import QtGui
import labelme.utils
# TODO(unknown):
# - [opt] Store paths instead of creating new ones at each paint.
DEFAULT_LINE_COLOR = QtGui.QColor(0, 255, 0, 128) # bf hovering
DEFAULT_FILL_COLOR = QtGui.QColor(0, 255, 0, 128) # hovering
DEFAULT_SELECT_LINE_COLOR = QtGui.QColor(255, 255, 255) # selected
DEFAULT_SELECT_FILL_COLOR = QtGui.QColor(0, 255, 0, 155) # selected
DEFAULT_VERTEX_FILL_COLOR = QtGui.QColor(0, 255, 0, 255) # hovering
DEFAULT_HVERTEX_FILL_COLOR = QtGui.QColor(255, 255, 255, 255) # hovering
class Shape(object):
# Render handles as squares
P_SQUARE = 0
# Render handles as circles
P_ROUND = 1
# Flag for the handles we would move if dragging
MOVE_VERTEX = 0
# Flag for all other handles on the curent shape
NEAR_VERTEX = 1
# The following class variables influence the drawing of all shape objects.
line_color = DEFAULT_LINE_COLOR
fill_color = DEFAULT_FILL_COLOR
select_line_color = DEFAULT_SELECT_LINE_COLOR
select_fill_color = DEFAULT_SELECT_FILL_COLOR
vertex_fill_color = DEFAULT_VERTEX_FILL_COLOR
hvertex_fill_color = DEFAULT_HVERTEX_FILL_COLOR
point_type = P_ROUND
point_size = 8
scale = 1.0
def __init__(
self,
label=None,
line_color=None,
shape_type=None,
flags=None,
group_id=None,
content=None
):
self.label = label
self.group_id = group_id
self.points = []
self.bbox = []
self.fill = False
self.selected = False
self.shape_type = shape_type
self.flags = flags
self.content = content
self.other_data = {}
self._highlightIndex = None
self._highlightMode = self.NEAR_VERTEX
self._highlightSettings = {
self.NEAR_VERTEX: (4, self.P_ROUND),
self.MOVE_VERTEX: (1.5, self.P_SQUARE),
}
self._closed = False
if line_color is not None:
# Override the class line_color attribute
# with an object attribute. Currently this
# is used for drawing the pending line a different color.
self.line_color = line_color
self.shape_type = shape_type
@property
def shape_type(self):
return self._shape_type
@shape_type.setter
def shape_type(self, value):
if value is None:
value = "polygon"
if value not in [
"polygon",
"rectangle",
"point",
"line",
"circle",
"linestrip",
]:
raise ValueError("Unexpected shape_type: {}".format(value))
self._shape_type = value
def close(self):
self._closed = True
def addPoint(self, point):
if self.points and point == self.points[0]:
self.close()
else:
self.points.append(point)
def canAddPoint(self):
return self.shape_type in ["polygon", "linestrip"]
def popPoint(self):
if self.points:
return self.points.pop()
return None
def insertPoint(self, i, point):
self.points.insert(i, point)
def removePoint(self, i):
self.points.pop(i)
def isClosed(self):
return self._closed
def setOpen(self):
self._closed = False
def getRectFromLine(self, pt1, pt2):
x1, y1 = pt1.x(), pt1.y()
x2, y2 = pt2.x(), pt2.y()
return QtCore.QRectF(x1, y1, x2 - x1, y2 - y1)
def paint(self, painter):
if self.points:
color = (
self.select_line_color if self.selected else self.line_color
)
pen = QtGui.QPen(color)
# Try using integer sizes for smoother drawing(?)
pen.setWidth(max(1, int(round(2.0 / self.scale))))
painter.setPen(pen)
line_path = QtGui.QPainterPath()
vrtx_path = QtGui.QPainterPath()
if self.shape_type == "rectangle":
assert len(self.points) in [1, 2]
if len(self.points) == 2:
rectangle = self.getRectFromLine(*self.points)
line_path.addRect(rectangle)
for i in range(len(self.points)):
self.drawVertex(vrtx_path, i)
elif self.shape_type == "circle":
assert len(self.points) in [1, 2]
if len(self.points) == 2:
rectangle = self.getCircleRectFromLine(self.points)
line_path.addEllipse(rectangle)
for i in range(len(self.points)):
self.drawVertex(vrtx_path, i)
elif self.shape_type == "linestrip":
line_path.moveTo(self.points[0])
for i, p in enumerate(self.points):
line_path.lineTo(p)
self.drawVertex(vrtx_path, i)
else:
line_path.moveTo(self.points[0])
# Uncommenting the following line will draw 2 paths
# for the 1st vertex, and make it non-filled, which
# may be desirable.
# self.drawVertex(vrtx_path, 0)
for i, p in enumerate(self.points):
line_path.lineTo(p)
self.drawVertex(vrtx_path, i)
if self.isClosed():
line_path.lineTo(self.points[0])
painter.drawPath(line_path)
painter.drawPath(vrtx_path)
painter.fillPath(vrtx_path, self._vertex_fill_color)
if self.fill:
color = (
self.select_fill_color
if self.selected
else self.fill_color
)
painter.fillPath(line_path, color)
def drawVertex(self, path, i):
d = self.point_size / self.scale
shape = self.point_type
point = self.points[i]
if i == self._highlightIndex:
size, shape = self._highlightSettings[self._highlightMode]
d *= size
if self._highlightIndex is not None:
self._vertex_fill_color = self.hvertex_fill_color
else:
self._vertex_fill_color = self.vertex_fill_color
if shape == self.P_SQUARE:
path.addRect(point.x() - d / 2, point.y() - d / 2, d, d)
elif shape == self.P_ROUND:
path.addEllipse(point, d / 2.0, d / 2.0)
else:
assert False, "unsupported vertex shape"
def nearestVertex(self, point, epsilon):
min_distance = float("inf")
min_i = None
for i, p in enumerate(self.points):
dist = labelme.utils.distance(p - point)
if dist <= epsilon and dist < min_distance:
min_distance = dist
min_i = i
return min_i
def nearestEdge(self, point, epsilon):
min_distance = float("inf")
post_i = None
for i in range(len(self.points)):
line = [self.points[i - 1], self.points[i]]
dist = labelme.utils.distancetoline(point, line)
if dist <= epsilon and dist < min_distance:
min_distance = dist
post_i = i
return post_i
def containsPoint(self, point):
return self.makePath().contains(point)
def getCircleRectFromLine(self, line):
"""Computes parameters to draw with `QPainterPath::addEllipse`"""
if len(line) != 2:
return None
(c, point) = line
r = line[0] - line[1]
d = math.sqrt(math.pow(r.x(), 2) + math.pow(r.y(), 2))
rectangle = QtCore.QRectF(c.x() - d, c.y() - d, 2 * d, 2 * d)
return rectangle
def makePath(self):
if self.shape_type == "rectangle":
path = QtGui.QPainterPath()
if len(self.points) == 2:
rectangle = self.getRectFromLine(*self.points)
path.addRect(rectangle)
elif self.shape_type == "circle":
path = QtGui.QPainterPath()
if len(self.points) == 2:
rectangle = self.getCircleRectFromLine(self.points)
path.addEllipse(rectangle)
else:
path = QtGui.QPainterPath(self.points[0])
for p in self.points[1:]:
path.lineTo(p)
return path
def boundingRect(self):
return self.makePath().boundingRect()
def moveBy(self, offset):
self.points = [p + offset for p in self.points]
def moveVertexBy(self, i, offset):
self.points[i] = self.points[i] + offset
def highlightVertex(self, i, action):
"""Highlight a vertex appropriately based on the current action
Args:
i (int): The vertex index
action (int): The action
(see Shape.NEAR_VERTEX and Shape.MOVE_VERTEX)
"""
self._highlightIndex = i
self._highlightMode = action
def highlightClear(self):
"""Clear the highlighted point"""
self._highlightIndex = None
def copy(self):
return copy.deepcopy(self)
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points[key]
def __setitem__(self, key, value):
self.points[key] = value | PypiClean |
/FJUtils-0.0.16-py3-none-any.whl/fjutils/optimizers.py | import optax
from typing import Optional, NamedTuple, Any
import chex
from jax import numpy as jnp
import jax
class OptaxScheduledWeightDecayState(NamedTuple):
count: jax.Array
def optax_add_scheduled_weight_decay(schedule_fn, mask=None):
"""
:param schedule_fn:
:param mask:
:return: Optax GradientTransformation inited
"""
def init_fn(params):
del params
return OptaxScheduledWeightDecayState(count=jnp.zeros([], jnp.int32))
def update_fn(updates, state, params):
if params is None:
raise ValueError('Params cannot be None for weight decay!')
weight_decay = schedule_fn(state.count)
updates = jax.tree_util.tree_map(
lambda g, p: g + weight_decay * p, updates, params
)
return updates, OptaxScheduledWeightDecayState(
count=optax.safe_int32_increment(state.count)
)
if mask is not None:
return optax.masked(optax.GradientTransformation(init_fn, update_fn), mask)
return optax.GradientTransformation(init_fn, update_fn)
def get_adamw_with_cosine_scheduler(
steps: int,
learning_rate: float = 5e-5,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
weight_decay: float = 1e-1,
gradient_accumulation_steps: int = 1,
mu_dtype: Optional[chex.ArrayDType] = None,
):
"""
:param gradient_accumulation_steps:
:param steps:
:param learning_rate:
:param b1:
:param b2:
:param eps:
:param eps_root:
:param weight_decay:
:param mu_dtype:
:return: Optimizer and Scheduler
"""
scheduler = optax.cosine_decay_schedule(
init_value=learning_rate,
decay_steps=steps
)
tx = optax.chain(
optax.scale_by_adam(
b1=b1,
b2=b2,
eps=eps,
eps_root=eps_root,
mu_dtype=mu_dtype
),
optax.add_decayed_weights(
weight_decay=weight_decay
),
optax.scale_by_schedule(scheduler),
optax.scale(-1)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler
def get_adamw_with_linear_scheduler(
steps: int,
learning_rate_start: float = 5e-5,
learning_rate_end: float = 1e-5,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
weight_decay: float = 1e-1,
gradient_accumulation_steps: int = 1,
mu_dtype: Optional[chex.ArrayDType] = None,
):
"""
:param gradient_accumulation_steps:
:param steps:
:param learning_rate_start:
:param learning_rate_end:
:param b1:
:param b2:
:param eps:
:param eps_root:
:param weight_decay:
:param mu_dtype:
:return: Optimizer and Scheduler
"""
scheduler = optax.linear_schedule(
init_value=learning_rate_start,
end_value=learning_rate_end,
transition_steps=steps
)
tx = optax.chain(
optax.scale_by_adam(
b1=b1,
b2=b2,
eps=eps,
eps_root=eps_root,
mu_dtype=mu_dtype
),
optax.add_decayed_weights(
weight_decay=weight_decay
),
optax.scale_by_schedule(scheduler),
optax.scale(-1)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler
def get_adafactor_with_linear_scheduler(
steps: int,
learning_rate_start: float = 5e-5,
learning_rate_end: float = 1e-5,
weight_decay=1e-1,
min_dim_size_to_factor: int = 128,
decay_rate: float = 0.8,
decay_offset: int = 0,
multiply_by_parameter_scale: float = True,
clipping_threshold: Optional[float] = 1.0,
momentum: Optional[float] = None,
dtype_momentum: Any = jnp.float32,
weight_decay_rate: Optional[float] = None,
eps: float = 1e-30,
factored: bool = True,
gradient_accumulation_steps: int = 1,
weight_decay_mask=None,
):
"""
:param gradient_accumulation_steps:
:param steps:
:param learning_rate_start:
:param learning_rate_end:
:param weight_decay:
:param min_dim_size_to_factor:
:param decay_rate:
:param decay_offset:
:param multiply_by_parameter_scale:
:param clipping_threshold:
:param momentum:
:param dtype_momentum:
:param weight_decay_rate:
:param eps:
:param factored:
:param weight_decay_mask:
:return: Optimizer and Scheduler
"""
scheduler = optax.linear_schedule(
init_value=learning_rate_start,
end_value=learning_rate_end,
transition_steps=steps
)
tx = optax.chain(
optax.adafactor(
learning_rate=scheduler,
min_dim_size_to_factor=min_dim_size_to_factor,
decay_rate=decay_rate,
decay_offset=decay_offset,
multiply_by_parameter_scale=multiply_by_parameter_scale,
clipping_threshold=clipping_threshold,
eps=eps,
momentum=momentum,
weight_decay_rate=weight_decay_rate,
dtype_momentum=dtype_momentum,
factored=factored
),
optax_add_scheduled_weight_decay(
lambda step: -scheduler(step) * weight_decay,
weight_decay_mask
)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler
def get_adafactor_with_cosine_scheduler(
steps: int,
learning_rate=5e-5,
weight_decay=1e-1,
min_dim_size_to_factor: int = 128,
decay_rate: float = 0.8,
decay_offset: int = 0,
multiply_by_parameter_scale: float = True,
clipping_threshold: Optional[float] = 1.0,
momentum: Optional[float] = None,
dtype_momentum: Any = jnp.float32,
weight_decay_rate: Optional[float] = None,
eps: float = 1e-30,
factored: bool = True,
weight_decay_mask=None,
gradient_accumulation_steps: int = 1
):
"""
:param gradient_accumulation_steps:
:param steps:
:param learning_rate:
:param weight_decay:
:param min_dim_size_to_factor:
:param decay_rate:
:param decay_offset:
:param multiply_by_parameter_scale:
:param clipping_threshold:
:param momentum:
:param dtype_momentum:
:param weight_decay_rate:
:param eps:
:param factored:
:param weight_decay_mask:
:param gradient_accumulation_steps
:return: Optimizer and Scheduler
"""
scheduler = optax.cosine_decay_schedule(
init_value=learning_rate,
decay_steps=steps
)
tx = optax.chain(
optax.adafactor(
learning_rate=scheduler,
min_dim_size_to_factor=min_dim_size_to_factor,
decay_rate=decay_rate,
decay_offset=decay_offset,
multiply_by_parameter_scale=multiply_by_parameter_scale,
clipping_threshold=clipping_threshold,
eps=eps,
momentum=momentum,
weight_decay_rate=weight_decay_rate,
dtype_momentum=dtype_momentum,
factored=factored
),
optax_add_scheduled_weight_decay(
lambda step: -scheduler(step) * weight_decay,
weight_decay_mask
)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler
def get_lion_with_cosine_scheduler(
steps: int,
learning_rate=5e-5,
alpha: float = 0.0,
exponent: float = 1.0,
b1: float = 0.9,
b2: float = 0.99,
gradient_accumulation_steps: int = 1,
mu_dtype: Optional[chex.ArrayDType] = None,
):
"""
Args:
learning_rate: An initial value `init_v`.
steps: Positive integer - the number of steps for which to apply
the decay for.
alpha: Float. The minimum value of the multiplier used to adjust the
learning rate.
exponent: Float. The default decay is 0.5 * (1 + cos(pi * t/T)), where t is
the current timestep and T is the `decay_steps`. The exponent modifies
this to be (0.5 * (1 + cos(pi * t/T))) ** exponent. Defaults to 1.0.
b1: Rate for combining the momentum and the current grad.
b2: Decay rate for the exponentially weighted average of grads.
mu_dtype: Optional `dtype` to be used for the momentum; if
`None` then the `dtype is inferred from `params` and `updates`.
gradient_accumulation_steps:gradient_accumulation_steps
Return:
Optimizer , Scheduler
"""
try:
scheduler = optax.cosine_decay_schedule(
init_value=learning_rate,
decay_steps=steps,
alpha=alpha,
exponent=exponent
)
except:
scheduler = optax.cosine_decay_schedule(
init_value=learning_rate,
decay_steps=steps,
alpha=alpha,
# exponent=exponent
)
tx = optax.chain(
optax.scale_by_lion(
b1=b1,
b2=b2,
mu_dtype=mu_dtype
),
optax.scale_by_schedule(scheduler),
optax.scale(-1)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler
def get_lion_with_linear_scheduler(
steps: int,
learning_rate_start: float = 5e-5,
learning_rate_end: float = 1e-5,
b1: float = 0.9,
b2: float = 0.99,
gradient_accumulation_steps: int = 1,
mu_dtype: Optional[chex.ArrayDType] = None,
):
"""
Args:
steps: total train steps (max_steps)
learning_rate_start: start learning rate for sure
learning_rate_end: end learning rate for sure :\
b1: Rate for combining the momentum and the current grad.
b2: Decay rate for the exponentially weighted average of grads.
mu_dtype: Optional `dtype` to be used for the momentum; if
`None` then the `dtype is inferred from `params` and `updates`.
gradient_accumulation_steps:gradient_accumulation_steps
Return:
Optimizer , Scheduler
"""
scheduler = optax.linear_schedule(
init_value=learning_rate_start,
end_value=learning_rate_end,
transition_steps=steps
)
tx = optax.chain(
optax.scale_by_lion(
b1=b1,
b2=b2,
mu_dtype=mu_dtype
),
optax.scale_by_schedule(scheduler),
optax.scale(-1)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler
def get_adamw_with_warm_up_cosine_scheduler(
steps: int,
learning_rate: float = 5e-5,
b1: float = 0.9,
b2: float = 0.999,
eps: float = 1e-8,
eps_root: float = 0.0,
weight_decay: float = 1e-1,
exponent: float = 1.0,
gradient_accumulation_steps: int = 1,
mu_dtype: Optional[chex.ArrayDType] = None
):
"""
:param steps:
:param learning_rate:
:param b1:
:param b2:
:param eps:
:param eps_root:
:param weight_decay:
:param exponent:
:param gradient_accumulation_steps:
:param mu_dtype:
:return:
"""
scheduler = optax.warmup_cosine_decay_schedule(
init_value=0.5e-7,
peak_value=learning_rate,
warmup_steps=steps,
decay_steps=steps + 1,
end_value=learning_rate,
exponent=exponent
)
tx = optax.chain(
optax.scale_by_adam(
b1=b1,
b2=b2,
eps=eps,
eps_root=eps_root,
mu_dtype=mu_dtype
),
optax.add_decayed_weights(
weight_decay=weight_decay
),
optax.scale_by_schedule(scheduler),
optax.scale(-1)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler
def get_adafactor_with_warm_up_cosine_scheduler(
steps: int,
learning_rate=5e-5,
weight_decay=1e-1,
min_dim_size_to_factor: int = 128,
decay_rate: float = 0.8,
decay_offset: int = 0,
multiply_by_parameter_scale: float = True,
clipping_threshold: Optional[float] = 1.0,
momentum: Optional[float] = None,
dtype_momentum: Any = jnp.float32,
weight_decay_rate: Optional[float] = None,
eps: float = 1e-30,
factored: bool = True,
exponent: float = 1.0,
weight_decay_mask=None,
gradient_accumulation_steps: int = 1
):
"""
:param steps:
:param learning_rate:
:param weight_decay:
:param min_dim_size_to_factor:
:param decay_rate:
:param decay_offset:
:param multiply_by_parameter_scale:
:param clipping_threshold:
:param momentum:
:param dtype_momentum:
:param weight_decay_rate:
:param eps:
:param factored:
:param exponent:
:param weight_decay_mask:
:param gradient_accumulation_steps:
:return:
"""
scheduler = optax.warmup_cosine_decay_schedule(
init_value=0.5e-7,
peak_value=learning_rate,
warmup_steps=steps,
decay_steps=steps + 1,
end_value=learning_rate,
exponent=exponent
)
tx = optax.chain(
optax.adafactor(
learning_rate=scheduler,
min_dim_size_to_factor=min_dim_size_to_factor,
decay_rate=decay_rate,
decay_offset=decay_offset,
multiply_by_parameter_scale=multiply_by_parameter_scale,
clipping_threshold=clipping_threshold,
eps=eps,
momentum=momentum,
weight_decay_rate=weight_decay_rate,
dtype_momentum=dtype_momentum,
factored=factored
),
optax_add_scheduled_weight_decay(
lambda step: -scheduler(step) * weight_decay,
weight_decay_mask
)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler
def get_lion_with_warm_up_cosine_scheduler(
steps: int,
learning_rate=5e-5,
exponent: float = 1.0,
b1: float = 0.9,
b2: float = 0.99,
gradient_accumulation_steps: int = 1,
mu_dtype: Optional[chex.ArrayDType] = None,
):
"""
:param steps:
:param learning_rate:
:param exponent:
:param b1:
:param b2:
:param gradient_accumulation_steps:
:param mu_dtype:
:return:
"""
scheduler = optax.warmup_cosine_decay_schedule(
init_value=0.5e-7,
peak_value=learning_rate,
warmup_steps=steps,
decay_steps=steps + 1,
end_value=learning_rate,
exponent=exponent,
)
tx = optax.chain(
optax.scale_by_lion(
b1=b1,
b2=b2,
mu_dtype=mu_dtype
),
optax.scale_by_schedule(scheduler),
optax.scale(-1)
)
if gradient_accumulation_steps > 1:
tx = optax.MultiSteps(
tx, gradient_accumulation_steps
)
return tx, scheduler | PypiClean |
/LitleSdkPython3-9.3.1b0.tar.gz/LitleSdkPython3-9.3.1b0/litleSdkPythonTestv2/functional/TestBatch.py |
import os, sys
lib_path = os.path.abspath('../all')
sys.path.append(lib_path)
from SetupTest import *
import unittest
import tempfile
import shutil
import copy
class TestBatch(unittest.TestCase):
def setUp(self):
self.merchantId = '0180'
def testSendToLitleSFTP_WithPreviouslyCreatedFile(self):
requestFileName = "litleSdk-testBatchFile-testSendToLitleSFTP_WithPreviouslyCreatedFile.xml"
request = litleBatchFileRequest(requestFileName)
requestFile = request.requestFile.name
self.assertTrue(os.path.exists(requestFile))
configFromFile = request.config
self.assertEqual('prelive.litle.com', configFromFile.batchHost)
self.assertEqual('15000', configFromFile.batchPort)
requestDir = configFromFile.batchRequestFolder
responseDir = configFromFile.batchResponseFolder
self.prepareTestRequest(request)
request.prepareForDelivery()
self.assertTrue(os.path.exists(requestFile))
self.assertTrue(os.path.getsize(requestFile) > 0)
request2 = litleBatchFileRequest(requestFileName)
response = request2.sendRequestSFTP(True)
self.assertPythonApi(request2, response)
self.assertGeneratedFiles(requestDir, responseDir, requestFileName, request2)
def testSendOnlyToLitleSFTP_WithPreviouslyCreatedFile(self):
requestFileName = "litleSdk-testBatchFile-testSendOnlyToLitleSFTP_WithPreviouslyCreatedFile.xml"
request = litleBatchFileRequest(requestFileName)
requestFile = request.requestFile.name
self.assertTrue(os.path.exists(requestFile))
configFromFile = request.config
self.assertEqual('prelive.litle.com', configFromFile.batchHost)
self.assertEqual('15000', configFromFile.batchPort)
requestDir = configFromFile.batchRequestFolder
responseDir = configFromFile.batchResponseFolder
self.prepareTestRequest(request)
request.prepareForDelivery()
self.assertTrue(os.path.exists(requestFile))
self.assertTrue(os.path.getsize(requestFile) > 0)
tempfile.mkdtemp()
newRequestDir = tempfile.gettempdir() + '/' + 'request'
if not os.path.exists(newRequestDir):
os.makedirs(newRequestDir)
newRequestFileName = 'litle.xml'
shutil.copyfile(requestFile, newRequestDir + '/' + newRequestFileName)
configForRequest2 = copy.deepcopy(configFromFile)
configForRequest2.batchRequestFolder = newRequestDir
request2 = litleBatchFileRequest(newRequestFileName, configForRequest2)
request2.sendRequestOnlyToSFTP(True)
request3 = litleBatchFileRequest(newRequestFileName, configForRequest2)
response = request3.retrieveOnlyFromSFTP()
self.assertPythonApi(request3, response)
self.assertGeneratedFiles(newRequestDir, responseDir, newRequestFileName, request3)
def testSendToLitleSFTP_WithFileConfig(self):
requestFileName = "litleSdk-testBatchFile-testSendToLitleSFTP_WithFileConfig.xml"
request = litleBatchFileRequest(requestFileName)
requestFile = request.requestFile.name
self.assertTrue(os.path.exists(requestFile))
configFromFile = request.config
self.assertEqual('prelive.litle.com', configFromFile.batchHost)
self.assertEqual('15000', configFromFile.batchPort)
self.assertEqual('True', configFromFile.printXml)
requestDir = configFromFile.batchRequestFolder
responseDir = configFromFile.batchResponseFolder
self.prepareTestRequest(request)
response = request.sendRequestSFTP()
self.assertPythonApi(request, response)
self.assertGeneratedFiles(requestDir, responseDir, requestFileName, request)
def testSendToLitleSFTP_WithConfigOverrides(self):
requestDir = tempfile.gettempdir() + '/' + 'request'
responseDir = tempfile.gettempdir() + '/' + 'response'
configOverrides = Configuration()
configOverrides.batchHost = 'prelive.litle.com'
configOverrides.sftpTimeout = '720000'
configOverrides.batchRequestFolder = requestDir
configOverrides.batchResponseFolder = responseDir
configOverrides.printXml = True
requestFileName = "litleSdk-testBatchFile-testSendToLitleSFTP_WithConfigOverrides.xml"
request = litleBatchFileRequest(requestFileName, configOverrides)
self.assertTrue(os.path.exists(request.requestFile.name))
self.assertTrue(request.config.printXml)
self.prepareTestRequest(request)
response = request.sendRequestSFTP()
self.assertPythonApi(request, response)
self.assertGeneratedFiles(requestDir, responseDir, requestFileName, request)
def assertPythonApi(self, request, response):
self.assertNotEqual(None, response)
self.assertNotEqual(None, response.litleResponse.litleSessionId)
self.assertEqual('0', response.litleResponse.response)
self.assertEqual('Valid Format', response.litleResponse.message)
self.assertEqual('9.3', response.litleResponse.version)
batchResponse = response.getNextBatchResponse()
self.assertNotEqual(None, response)
self.assertNotEqual(None, batchResponse.batchResponse.litleBatchId)
self.assertEqual(self.merchantId, batchResponse.batchResponse.merchantId)
saleResponse = batchResponse.getNextTransaction()
self.assertEqual('000', saleResponse.response)
self.assertEqual('Approved', saleResponse.message)
self.assertNotEqual(None, saleResponse.litleTxnId)
self.assertEqual('orderId11', saleResponse.orderId)
self.assertEqual('reportGroup11', saleResponse.reportGroup)
def prepareTestRequest(self, request):
batchRequest = request.createBatch()
sale = litleXmlFields.sale()
sale.reportGroup = 'reportGroup11'
sale.orderId = 'orderId11'
sale.amount = 1099
sale.orderSource = 'ecommerce'
card = litleXmlFields.cardType()
card.type = 'VI'
card.number = "4457010000000009"
card.expDate = "0114"
sale.card = card
batchRequest.addTransaction(sale)
def assertGeneratedFiles(self, requestDir, responseDir, requestFileName, request):
requestPath = requestDir + '/' + requestFileName
responsePath = responseDir + '/' + requestFileName
fRequest = os.path.abspath(request.requestFile.name)
fResponse = os.path.abspath(request.responseFile.name)
self.assertEqual(requestPath, fRequest)
self.assertEqual(responsePath, fResponse)
self.assertTrue(os.path.exists(fRequest))
self.assertTrue(os.path.exists(fResponse))
self.assertTrue(os.path.getsize(fRequest) > 0)
self.assertTrue(os.path.getsize(fResponse) > 0)
responseFromFile = litleBatchFileResponse(fResponse)
self.assertPythonApi(request, responseFromFile)
def suite():
suite = unittest.TestSuite()
suite = unittest.TestLoader().loadTestsFromTestCase(TestBatch)
return suite
if __name__ =='__main__':
unittest.main() | PypiClean |
/CADET-Process-0.7.3.tar.gz/CADET-Process-0.7.3/CADETProcess/simulator/cadetAdapter.py | from collections import defaultdict
import os
import platform
from pathlib import Path
import shutil
import subprocess
from subprocess import TimeoutExpired
import time
import tempfile
from addict import Dict
import numpy as np
from cadet import Cadet as CadetAPI
from CADETProcess import CADETProcessError
from CADETProcess import settings
from CADETProcess.dataStructure import (
Bool, Switch, UnsignedFloat, UnsignedInteger,
)
from .simulator import SimulatorBase
from CADETProcess import SimulationResults
from CADETProcess.solution import (
SolutionIO, SolutionBulk, SolutionParticle, SolutionSolid, SolutionVolume
)
from CADETProcess.processModel import NoBinding, BindingBaseClass
from CADETProcess.processModel import NoReaction, ReactionBaseClass
from CADETProcess.processModel import NoDiscretization, DGMixin
from CADETProcess.processModel import (
UnitBaseClass, Inlet, Cstr, TubularReactor, LumpedRateModelWithoutPores
)
from CADETProcess.processModel import Process
__all__ = [
'Cadet',
'ModelSolverParametersGroup',
'UnitParametersGroup',
'AdsorptionParametersGroup',
'ReactionParametersGroup',
'SolverParametersGroup',
'SolverTimeIntegratorParametersGroup',
'ReturnParametersGroup',
'SensitivityParametersGroup',
]
class Cadet(SimulatorBase):
"""CADET class for running a simulation for given process objects.
Attributes
----------
install_path: str
Path to the installation of CADET
time_out : UnsignedFloat
Maximum duration for simulations
model_solver_parameters : ModelSolverParametersGroup
Container for solver parameters
unit_discretization_parameters : UnitDiscretizationParametersGroup
Container for unit discretization parameters
discretization_weno_parameters : DiscretizationWenoParametersGroup
Container for weno discretization parameters in units
adsorption_consistency_solver_parameters : ConsistencySolverParametersGroup
Container for consistency solver parameters
solver_parameters : SolverParametersGroup
Container for general solver settings
time_integrator_parameters : SolverTimeIntegratorParametersGroup
Container for time integrator parameters
return_parameters : ReturnParametersGroup
Container for return information of the system
..todo::
Implement method for loading CADET file that have not been generated
with CADETProcess and create Process
See Also
--------
ReturnParametersGroup
ModelSolverParametersGroup
SolverParametersGroup
SolverTimeIntegratorParametersGroup
CadetAPI
"""
timeout = UnsignedFloat()
use_c_api = Bool(default=False)
_force_constant_flow_rate = False
def __init__(self, install_path=None, temp_dir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.install_path = install_path
self.model_solver_parameters = ModelSolverParametersGroup()
self.solver_parameters = SolverParametersGroup()
self.time_integrator_parameters = SolverTimeIntegratorParametersGroup()
self.return_parameters = ReturnParametersGroup()
self.sensitivity_parameters = SensitivityParametersGroup()
if temp_dir is None:
temp_dir = settings.temp_dir / 'simulation_files'
self.temp_dir = temp_dir
@property
def temp_dir(self):
if not self._temp_dir.exists():
self._temp_dir.mkdir(exist_ok=True, parents=True)
return self._temp_dir
@temp_dir.setter
def temp_dir(self, temp_dir):
self._temp_dir = temp_dir
@property
def install_path(self):
"""str: Path to the installation of CADET.
Parameters
----------
install_path : str or None
Path to the installation of CADET.
If None, the system installation will be used.
Raises
------
FileNotFoundError
If CADET can not be found.
See Also
--------
check_cadet
"""
return self._install_path
@install_path.setter
def install_path(self, install_path):
if install_path is None:
executable = 'cadet-cli'
if platform.system() == 'Windows':
if self.use_c_api:
executable += '.dll'
else:
executable += '.exe'
else:
if self.use_c_api:
executable += '.so'
try:
install_path = shutil.which(executable)
except TypeError:
raise FileNotFoundError(
"CADET could not be found. Please set an install path"
)
install_path = Path(install_path).expanduser()
if install_path.exists():
self._install_path = install_path
CadetAPI.cadet_path = install_path
self.logger.info(f"CADET was found here: {install_path}")
else:
raise FileNotFoundError(
"CADET could not be found. Please check the path"
)
cadet_lib_path = install_path.parent.parent / "lib"
try:
if cadet_lib_path.as_posix() not in os.environ['LD_LIBRARY_PATH']:
os.environ['LD_LIBRARY_PATH'] = \
cadet_lib_path.as_posix() \
+ os.pathsep \
+ os.environ['LD_LIBRARY_PATH']
except KeyError:
os.environ['LD_LIBRARY_PATH'] = cadet_lib_path.as_posix()
def check_cadet(self):
"""Check CADET installation can run basic LWE example."""
executable = 'createLWE'
if platform.system() == 'Windows':
executable += '.exe'
lwe_path = self.install_path.parent / executable
ret = subprocess.run(
[lwe_path.as_posix()],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.temp_dir
)
if ret.returncode != 0:
if ret.stdout:
print('Output', ret.stdout.decode('utf-8'))
if ret.stderr:
print('Errors', ret.stderr.decode('utf-8'))
raise CADETProcessError(
"Failure: Creation of test simulation ran into problems"
)
lwe_hdf5_path = Path(self.temp_dir) / 'LWE.h5'
sim = CadetAPI()
sim.filename = lwe_hdf5_path.as_posix()
data = sim.run()
os.remove(sim.filename)
if data.returncode == 0:
print("Test simulation completed successfully")
else:
print(data)
raise CADETProcessError(
"Simulation failed"
)
def get_tempfile_name(self):
f = next(tempfile._get_candidate_names())
return self.temp_dir / f'{f}.h5'
def run(self, process, cadet=None, file_path=None):
"""Interface to the solver run function.
The configuration is extracted from the process object and then saved
as a temporary .h5 file. After termination, the same file is processed
and the results are returned.
Cadet Return information:
- 0: pass (everything allright)
- 1: Standard Error
- 2: IO Error
- 3: Solver Error
Parameters
----------
process : Process
process to be simulated
Returns
-------
results : SimulationResults
Simulation results including process and solver configuration.
Raises
------
TypeError
If process is not instance of Process
See Also
--------
get_process_config
get_simulation_results
"""
if not isinstance(process, Process):
raise TypeError('Expected Process')
if cadet is None:
cadet = CadetAPI()
cadet.root = self.get_process_config(process)
if cadet.is_file:
if file_path is None:
cadet.filename = self.get_tempfile_name()
else:
cadet.filename = file_path
cadet.save()
try:
start = time.time()
return_information = cadet.run_load(timeout=self.timeout)
elapsed = time.time() - start
except TimeoutExpired:
raise CADETProcessError('Simulator timed out') from None
finally:
if file_path is None:
os.remove(cadet.filename)
if return_information.returncode != 0:
self.logger.error(
f'Simulation of {process.name} '
f'with parameters {process.config} failed.'
)
raise CADETProcessError(
f'CADET Error: Simulation failed with {return_information.stderr}'
) from None
try:
results = self.get_simulation_results(
process, cadet, elapsed, return_information
)
except TypeError:
raise CADETProcessError(
'Unexpected error reading SimulationResults.'
)
return results
def save_to_h5(self, process, file_path):
cadet = CadetAPI()
cadet.root = self.get_process_config(process)
cadet.filename = file_path
cadet.save()
def run_h5(self, file_path):
cadet = CadetAPI()
cadet.filename = file_path
cadet.load()
cadet.run_load(timeout=self.timeout)
return cadet
def load_from_h5(self, file_path):
cadet = CadetAPI()
cadet.filename = file_path
cadet.load()
return cadet
def get_process_config(self, process):
"""Create the CADET config.
Returns
-------
config : Dict
/
Notes
-----
Sensitivities not implemented yet.
See Also
--------
get_input_model
get_input_solver
get_input_return
get_input_sensitivity
"""
process.lock = True
config = Dict()
config.input.model = self.get_input_model(process)
config.input.solver = self.get_input_solver(process)
config.input['return'] = self.get_input_return(process)
config.input.sensitivity = self.get_input_sensitivity(process)
process.lock = False
return config
def load_simulation_results(self, process, file_path):
cadet = self.load_from_h5(file_path)
results = self.get_simulation_results(process, cadet)
return results
def get_simulation_results(
self,
process,
cadet,
time_elapsed=None,
return_information=None):
"""Read simulation results from CADET configuration.
Parameters
----------
process : Process
Process that was simulated.
cadet : CadetAPI
Cadet object with simulation results.
time_elapsed : float
Time of simulation.
return_information: str
CADET-cli return information.
Returns
-------
results : SimulationResults
Simulation results including process and solver configuration.
..todo::
Implement method to read .h5 files that have no process associated.
"""
if time_elapsed is None:
time_elapsed = cadet.root.meta.time_sim
time = self.get_solution_time(process)
if return_information is None:
exit_flag = None
exit_message = None
else:
exit_flag = return_information.returncode
exit_message = return_information.stderr.decode()
try:
solution = Dict()
for unit in process.flow_sheet.units:
solution[unit.name] = defaultdict(list)
unit_index = self.get_unit_index(process, unit)
unit_solution = cadet.root.output.solution[unit_index]
unit_coordinates = \
cadet.root.output.coordinates[unit_index].copy()
particle_coordinates = \
unit_coordinates.pop('particle_coordinates_000', None)
flow_in = process.flow_rate_timelines[unit.name].total_in
flow_out = process.flow_rate_timelines[unit.name].total_out
for cycle in range(self.n_cycles):
start = cycle * len(time)
end = (cycle + 1) * len(time)
if 'solution_inlet' in unit_solution.keys():
sol_inlet = unit_solution.solution_inlet[start:end, :]
solution[unit.name]['inlet'].append(
SolutionIO(
unit.name,
unit.component_system, time, sol_inlet,
flow_in
)
)
if 'solution_outlet' in unit_solution.keys():
sol_outlet = unit_solution.solution_outlet[start:end, :]
solution[unit.name]['outlet'].append(
SolutionIO(
unit.name,
unit.component_system, time, sol_outlet,
flow_out
)
)
if 'solution_bulk' in unit_solution.keys():
sol_bulk = unit_solution.solution_bulk[start:end, :]
solution[unit.name]['bulk'].append(
SolutionBulk(
unit.name,
unit.component_system, time, sol_bulk,
**unit_coordinates
)
)
if 'solution_particle' in unit_solution.keys():
sol_particle = unit_solution.solution_particle[start:end, :]
solution[unit.name]['particle'].append(
SolutionParticle(
unit.name,
unit.component_system, time, sol_particle,
**unit_coordinates,
particle_coordinates=particle_coordinates
)
)
if 'solution_solid' in unit_solution.keys():
sol_solid = unit_solution.solution_solid[start:end, :]
solution[unit.name]['solid'].append(
SolutionSolid(
unit.name,
unit.component_system,
unit.binding_model.bound_states,
time, sol_solid,
**unit_coordinates,
particle_coordinates=particle_coordinates
)
)
if 'solution_volume' in unit_solution.keys():
sol_volume = unit_solution.solution_volume[start:end, :]
solution[unit.name]['volume'].append(
SolutionVolume(
unit.name,
unit.component_system,
time,
sol_volume
)
)
solution = Dict(solution)
sensitivity = Dict()
for i, sens in enumerate(process.parameter_sensitivities):
sens_index = f'param_{i:03d}'
for unit in process.flow_sheet.units:
sensitivity[sens.name][unit.name] = defaultdict(list)
unit_index = self.get_unit_index(process, unit)
unit_sensitivity = cadet.root.output.sensitivity[sens_index][unit_index]
unit_coordinates = \
cadet.root.output.coordinates[unit_index].copy()
particle_coordinates = \
unit_coordinates.pop('particle_coordinates_000', None)
flow_in = process.flow_rate_timelines[unit.name].total_in
flow_out = process.flow_rate_timelines[unit.name].total_out
for cycle in range(self.n_cycles):
start = cycle * len(time)
end = (cycle + 1) * len(time)
if 'sens_inlet' in unit_sensitivity.keys():
sens_inlet = unit_sensitivity.sens_inlet[start:end, :]
sensitivity[sens.name][unit.name]['inlet'].append(
SolutionIO(
unit.name,
unit.component_system, time, sens_inlet,
flow_in
)
)
if 'sens_outlet' in unit_sensitivity.keys():
sens_outlet = unit_sensitivity.sens_outlet[start:end, :]
sensitivity[sens.name][unit.name]['outlet'].append(
SolutionIO(
unit.name,
unit.component_system, time, sens_outlet,
flow_out
)
)
if 'sens_bulk' in unit_sensitivity.keys():
sens_bulk = unit_sensitivity.sens_bulk[start:end, :]
sensitivity[sens.name][unit.name]['bulk'].append(
SolutionBulk(
unit.name,
unit.component_system, time, sens_bulk,
**unit_coordinates
)
)
if 'sens_particle' in unit_sensitivity.keys():
sens_particle = unit_sensitivity.sens_particle[start:end, :]
sensitivity[sens.name][unit.name]['particle'].append(
SolutionParticle(
unit.name,
unit.component_system, time, sens_particle,
**unit_coordinates,
particle_coordinates=particle_coordinates
)
)
if 'sens_solid' in unit_sensitivity.keys():
sens_solid = unit_sensitivity.sens_solid[start:end, :]
sensitivity[sens.name][unit.name]['solid'].append(
SolutionSolid(
unit.name,
unit.component_system,
unit.binding_model.bound_states,
time, sens_solid,
**unit_coordinates,
particle_coordinates=particle_coordinates
)
)
if 'sens_volume' in unit_sensitivity.keys():
sens_volume = unit_sensitivity.sens_volume[start:end, :]
sensitivity[sens.name][unit.name]['volume'].append(
SolutionVolume(
unit.name,
unit.component_system,
time,
sens_volume
)
)
sensitivity = Dict(sensitivity)
system_state = {
'state': cadet.root.output.last_state_y,
'state_derivative': cadet.root.output.last_state_ydot
}
chromatograms = [
solution[chrom.name].outlet[-1]
for chrom in process.flow_sheet.product_outlets
]
except KeyError:
raise CADETProcessError('Results don\'t match Process')
results = SimulationResults(
solver_name=str(self),
solver_parameters=dict(),
exit_flag=exit_flag,
exit_message=exit_message,
time_elapsed=time_elapsed,
process=process,
solution_cycles=solution,
sensitivity_cycles=sensitivity,
system_state=system_state,
chromatograms=chromatograms
)
return results
def get_input_model(self, process):
"""Config branch /input/model/
Notes
-----
!!! External functions not implemented yet
See Also
--------
model_connections
model_solver
model_units
input_model_parameters
"""
input_model = Dict()
input_model.connections = self.get_model_connections(process)
# input_model.external = self.model_external # !!! not working yet
input_model.solver = self.model_solver_parameters.to_dict()
input_model.update(self.get_model_units(process))
if process.system_state is not None:
input_model['INIT_STATE_Y'] = process.system_state
if process.system_state_derivative is not None:
input_model['INIT_STATE_YDOT'] = process.system_state_derivative
return input_model
def get_model_connections(self, process):
"""Config branch /input/model/connections"""
model_connections = Dict()
if self._force_constant_flow_rate:
model_connections['CONNECTIONS_INCLUDE_DYNAMIC_FLOW'] = 0
else:
model_connections['CONNECTIONS_INCLUDE_DYNAMIC_FLOW'] = 1
index = 0
section_states = process.flow_rate_section_states
for cycle in range(0, self.n_cycles):
for flow_rates_state in section_states.values():
switch_index = f'switch_{index:03d}'
model_connections[switch_index].section = index
connections = self.cadet_connections(
flow_rates_state, process.flow_sheet
)
model_connections[switch_index].connections = connections
index += 1
model_connections.nswitches = index
return model_connections
def cadet_connections(self, flow_rates, flow_sheet):
"""list: Connections matrix for flow_rates state.
Parameters
----------
flow_rates : dict
UnitOperations with outgoing flow rates.
flow_sheet : FlowSheet
Object which hosts units (for getting unit index).
Returns
-------
ls : list
Connections matrix for DESCRIPTION.
"""
table = Dict()
enum = 0
for origin, unit_flow_rates in flow_rates.items():
origin = flow_sheet[origin]
origin_index = flow_sheet.get_unit_index(origin)
for dest, flow_rate in unit_flow_rates.destinations.items():
destination = flow_sheet[dest]
destination_index = flow_sheet.get_unit_index(destination)
if np.any(flow_rate):
table[enum] = []
table[enum].append(int(origin_index))
table[enum].append(int(destination_index))
table[enum].append(-1)
table[enum].append(-1)
Q = flow_rate.tolist()
if self._force_constant_flow_rate:
table[enum] += [Q[0]]
else:
table[enum] += Q
enum += 1
ls = []
for connection in table.values():
ls += connection
return ls
def get_unit_index(self, process, unit):
"""Helper function for getting unit index in CADET format unit_xxx.
Parameters
----------
process : Process
process to be simulated
unit : UnitOperation
Indexed object
Returns
-------
unit_index : str
Return the unit index in CADET format unit_XXX
"""
index = process.flow_sheet.get_unit_index(unit)
return f'unit_{index:03d}'
def get_model_units(self, process):
"""Config branches for all units /input/model/unit_000 ... unit_xxx.
See Also
--------
get_unit_config
get_unit_index
"""
model_units = Dict()
model_units.nunits = len(process.flow_sheet.units)
for unit in process.flow_sheet.units:
unit_index = self.get_unit_index(process, unit)
model_units[unit_index] = self.get_unit_config(unit)
self.set_section_dependent_parameters(model_units, process)
return model_units
def get_unit_config(self, unit):
"""Config branch /input/model/unit_xxx for individual unit.
The unit operation parameters are converted to CADET format
Notes
-----
In CADET, the parameter unit_config['discretization'].NBOUND should be
moved to binding config or unit config
See Also
--------
get_adsorption_config
"""
unit_parameters = UnitParametersGroup(unit)
unit_config = Dict(unit_parameters.to_dict())
if not isinstance(unit.binding_model, NoBinding):
if unit.binding_model.n_binding_sites > 1:
n_bound = \
[unit.binding_model.n_binding_sites] * unit.binding_model.n_comp
else:
n_bound = unit.binding_model.bound_states
unit_config['adsorption'] = \
self.get_adsorption_config(unit.binding_model)
unit_config['adsorption_model'] = \
unit_config['adsorption']['ADSORPTION_MODEL']
else:
n_bound = unit.n_comp*[0]
if not isinstance(unit.discretization, NoDiscretization):
unit_config['discretization'] = unit.discretization.parameters
if isinstance(unit.discretization, DGMixin):
unit_config['UNIT_TYPE'] += '_DG'
if isinstance(unit, Cstr) \
and not isinstance(unit.binding_model, NoBinding):
unit_config['nbound'] = n_bound
else:
unit_config['discretization']['nbound'] = n_bound
if not isinstance(unit.bulk_reaction_model, NoReaction):
parameters = self.get_reaction_config(unit.bulk_reaction_model)
if isinstance(unit, TubularReactor):
unit_config['reaction_model'] = parameters['REACTION_MODEL']
for key, value in parameters.items():
key = key.replace('bulk', 'liquid')
unit_config['reaction'][key] = value
else:
unit_config['reaction_model'] = parameters['REACTION_MODEL']
unit_config['reaction_bulk'] = parameters
if not isinstance(unit.particle_reaction_model, NoReaction):
parameters = self.get_reaction_config(unit.particle_reaction_model)
if isinstance(unit, LumpedRateModelWithoutPores):
unit_config['reaction_model'] = parameters['REACTION_MODEL']
unit_config['reaction'] = parameters
else:
unit_config['reaction_model_particle'] = parameters['REACTION_MODEL']
unit_config['reaction_particle'].update(parameters)
if isinstance(unit, Inlet):
unit_config['sec_000']['const_coeff'] = unit.c[:, 0]
unit_config['sec_000']['lin_coeff'] = unit.c[:, 1]
unit_config['sec_000']['quad_coeff'] = unit.c[:, 2]
unit_config['sec_000']['cube_coeff'] = unit.c[:, 3]
return unit_config
def set_section_dependent_parameters(self, model_units, process):
"""Add time dependent model parameters to units."""
section_states = process.section_states.values()
section_index = 0
for cycle in range(0, self.n_cycles):
for param_states in section_states:
for param, state in param_states.items():
param = param.split('.')
unit_name = param[1]
param_name = param[-1]
try:
unit = process.flow_sheet[unit_name]
except KeyError:
if unit_name == 'output_states':
continue
else:
raise CADETProcessError(
'Unexpected section dependent parameter'
)
if param_name == 'flow_rate':
continue
unit_index = process.flow_sheet.get_unit_index(unit)
if isinstance(unit, Inlet) and param_name == 'c':
self.add_inlet_section(
model_units, section_index, unit_index, state
)
else:
unit_model = unit.model
self.add_parameter_section(
model_units, section_index, unit_index,
unit_model, param_name, state
)
section_index += 1
def add_inlet_section(self, model_units, sec_index, unit_index, coeffs):
unit_index = f'unit_{unit_index:03d}'
section_index = f'sec_{sec_index:03d}'
model_units[unit_index][section_index]['const_coeff'] = coeffs[:, 0]
model_units[unit_index][section_index]['lin_coeff'] = coeffs[:, 1]
model_units[unit_index][section_index]['quad_coeff'] = coeffs[:, 2]
model_units[unit_index][section_index]['cube_coeff'] = coeffs[:, 3]
def add_parameter_section(
self, model_units, sec_index, unit_index, unit_model,
parameter, state):
"""Add section value to parameter branch."""
unit_index = f'unit_{unit_index:03d}'
parameter_name = \
inv_unit_parameters_map[unit_model]['parameters'][parameter]
if sec_index == 0:
model_units[unit_index][parameter_name] = []
model_units[unit_index][parameter_name] += list(state.ravel())
def get_adsorption_config(self, binding):
"""Config branch /input/model/unit_xxx/adsorption for individual unit.
Binding model parameters are extracted and converted to CADET format.
Parameters
----------
binding : BindingBaseClass
Binding model
See Also
--------
get_unit_config
"""
adsorption_config = AdsorptionParametersGroup(binding).to_dict()
return adsorption_config
def get_reaction_config(self, reaction):
"""Config branch /input/model/unit_xxx/reaction for individual unit.
Reaction model parameters are extracted and converted to CADET format.
Parameters
----------
reaction : ReactionBaseClass
Reaction model
See Also
--------
get_unit_config
"""
reaction_config = ReactionParametersGroup(reaction).to_dict()
return reaction_config
def get_input_solver(self, process):
"""Config branch /input/solver/
See Also
--------
solver_sections
solver_time_integrator
"""
input_solver = Dict()
input_solver.update(self.solver_parameters.to_dict())
input_solver.user_solution_times = \
self.get_solution_time_complete(process)
input_solver.sections = self.get_solver_sections(process)
input_solver.time_integrator = \
self.time_integrator_parameters.to_dict()
return input_solver
def get_solver_sections(self, process):
"""Config branch /input/solver/sections"""
solver_sections = Dict()
solver_sections.nsec = self.n_cycles * process.n_sections
solver_sections.section_times = \
self.get_section_times_complete(process)
solver_sections.section_continuity = [0] * (solver_sections.nsec - 1)
return solver_sections
def get_input_return(self, process):
"""Config branch /input/return"""
return_parameters = self.return_parameters.to_dict()
unit_return_parameters = self.get_unit_return_parameters(process)
return {**return_parameters, **unit_return_parameters}
def get_unit_return_parameters(self, process):
"""Config branches for all units /input/return/unit_000 ... unit_xxx"""
unit_return_parameters = Dict()
for unit in process.flow_sheet.units:
unit_index = self.get_unit_index(process, unit)
unit_return_parameters[unit_index] = \
unit.solution_recorder.parameters
return unit_return_parameters
def get_input_sensitivity(self, process):
"""Config branch /input/sensitivity"""
sensitivity_parameters = self.sensitivity_parameters.to_dict()
parameter_sensitivities = self.get_parameter_sensitivities(process)
return {**sensitivity_parameters, **parameter_sensitivities}
def get_parameter_sensitivities(self, process):
"""Config branches for all parameter sensitivities /input/sensitivity/param_000 ... param_xxx"""
parameter_sensitivities = Dict()
parameter_sensitivities.nsens = process.n_sensitivities
for i, sens in enumerate(process.parameter_sensitivities):
sens_index = f'param_{i:03d}'
parameter_sensitivities[sens_index] = \
self.get_sensitivity_config(process, sens)
return parameter_sensitivities
def get_sensitivity_config(self, process, sens):
config = Dict()
unit_indices = []
parameters = []
components = []
for param, unit, associated_model, comp, coeff in zip(
sens.parameters, sens.units, sens.associated_models, sens.components,
sens.polynomial_coefficients):
unit_index = process.flow_sheet.get_unit_index(unit)
unit_indices.append(unit_index)
if associated_model is None:
model = unit.model
if model == 'Inlet' and param == 'c':
if coeff == 0:
coeff = 'CONST_COEFF'
elif coeff == 1:
coeff = 'CONST_COEFF'
elif coeff == 2:
coeff = 'QUAD_COEFF'
elif coeff == 3:
coeff = 'CUBE_COEFF'
parameter = coeff
else:
parameter = inv_unit_parameters_map[model]['parameters'][param]
else:
model = associated_model.model
if isinstance(associated_model, BindingBaseClass):
parameter = inv_adsorption_parameters_map[model]['parameters'][param]
if isinstance(associated_model, ReactionBaseClass):
parameter = inv_reaction_parameters_map[model]['parameters'][param]
parameters.append(parameter)
component_system = unit.component_system
comp = -1 if comp is None else component_system.indices[comp]
components.append(comp)
config.sens_unit = unit_indices
config.sens_name = parameters
config.sens_comp = components
config.sens_partype = -1 # !!! Check when multiple particle types enabled.
if not all([index is None for index in sens.bound_state_indices]):
config.sens_reaction = [
-1 if index is None else index for index in sens.bound_state_indices
]
else:
config.sens_reaction = -1
if not all([index is None for index in sens.bound_state_indices]):
config.sens_boundphase = [
-1 if index is None else index for index in sens.bound_state_indices
]
else:
config.sens_boundphase = -1
if not all([index is None for index in sens.section_indices]):
config.sens_section = [
-1 if index is None else index for index in sens.section_indices
]
else:
config.sens_section = -1
if not all([index is None for index in sens.abstols]):
config.sens_abstol = sens.abstols
config.factors = sens.factors
return config
def __str__(self):
return 'CADET'
from CADETProcess.dataStructure import ParametersGroup, ParameterWrapper
class ModelSolverParametersGroup(ParametersGroup):
"""Converter for model solver parameters from CADETProcess to CADET.
Attributes
----------
gs_type : {1, 0}, optional
Valid modes:
- 0: Classical Gram-Schmidet orthogonalization.
- 1: Modified Gram-Schmidt.
The default is 1.
max_krylov : int, optional
Size of the Krylov subspace in the iterative linear GMRES solver.
The default is 0.
max_restarts : int, optional
Maximum number of restarts in the GMRES algorithm. If lack of memory is not an
issue, better use a larger Krylov space than restarts.
The default is 10.
schur_safety : float, optional
Schur safety factor.
Influences the tradeoff between linear iterations and nonlinear error control
The default is 1e-8.
linear_solution_mode : int
Valid modes:
- 0: Automatically chose mode based on heuristic.
- 1: Solve system of models in parallel
- 2: Solve system of models sequentially (only possible for systems without cyclic connections)
The default is 0.
See Also
--------
ParametersGroup
"""
gs_type = Switch(default=1, valid=[0, 1])
max_krylov = UnsignedInteger(default=0)
max_restarts = UnsignedInteger(default=10)
schur_safety = UnsignedFloat(default=1e-8)
linear_solution_mode = UnsignedInteger(default=0, ub=2)
_parameters = [
'gs_type',
'max_krylov',
'max_restarts',
'schur_safety',
'linear_solution_mode',
]
unit_parameters_map = {
'GeneralRateModel': {
'name': 'GENERAL_RATE_MODEL',
'parameters': {
'NCOMP': 'n_comp',
'INIT_C': 'c',
'INIT_Q': 'q',
'INIT_CP': 'cp',
'COL_DISPERSION': 'axial_dispersion',
'COL_LENGTH': 'length',
'COL_POROSITY': 'bed_porosity',
'FILM_DIFFUSION': 'film_diffusion',
'PAR_POROSITY': 'particle_porosity',
'PAR_RADIUS': 'particle_radius',
'PORE_ACCESSIBILITY': 'pore_accessibility',
'PAR_DIFFUSION': 'pore_diffusion',
'PAR_SURFDIFFUSION': 'surface_diffusion',
'CROSS_SECTION_AREA': 'cross_section_area',
'VELOCITY': 'flow_direction',
},
'fixed': {
'PAR_SURFDIFFUSION_MULTIPLEX': 0,
},
},
'LumpedRateModelWithPores': {
'name': 'LUMPED_RATE_MODEL_WITH_PORES',
'parameters': {
'NCOMP': 'n_comp',
'INIT_C': 'c',
'INIT_CP': 'cp',
'INIT_Q': 'q',
'COL_DISPERSION': 'axial_dispersion',
'COL_LENGTH': 'length',
'COL_POROSITY': 'bed_porosity',
'FILM_DIFFUSION': 'film_diffusion',
'PAR_POROSITY': 'particle_porosity',
'PAR_RADIUS': 'particle_radius',
'PORE_ACCESSIBILITY': 'pore_accessibility',
'CROSS_SECTION_AREA': 'cross_section_area',
'VELOCITY': 'flow_direction',
},
},
'LumpedRateModelWithoutPores': {
'name': 'LUMPED_RATE_MODEL_WITHOUT_PORES',
'parameters': {
'NCOMP': 'n_comp',
'INIT_C': 'c',
'INIT_Q': 'q',
'COL_DISPERSION': 'axial_dispersion',
'COL_LENGTH': 'length',
'TOTAL_POROSITY': 'total_porosity',
'CROSS_SECTION_AREA': 'cross_section_area',
'VELOCITY': 'flow_direction',
},
},
'TubularReactor': {
'name': 'LUMPED_RATE_MODEL_WITHOUT_PORES',
'parameters': {
'NCOMP': 'n_comp',
'INIT_C': 'c',
'COL_DISPERSION': 'axial_dispersion',
'COL_LENGTH': 'length',
'CROSS_SECTION_AREA': 'cross_section_area',
'VELOCITY': 'flow_direction',
},
'fixed': {
'TOTAL_POROSITY': 1,
},
},
'Cstr': {
'name': 'CSTR',
'parameters': {
'NCOMP': 'n_comp',
'INIT_VOLUME': 'V',
'INIT_C': 'c',
'INIT_Q': 'q',
'POROSITY': 'porosity',
'FLOWRATE_FILTER': 'flow_rate_filter',
},
},
'Inlet': {
'name': 'INLET',
'parameters': {
'NCOMP': 'n_comp',
},
'fixed': {
'INLET_TYPE': 'PIECEWISE_CUBIC_POLY',
},
},
'Outlet': {
'name': 'OUTLET',
'parameters': {
'NCOMP': 'n_comp',
},
},
'MixerSplitter': {
'name': 'CSTR',
'parameters': {
'NCOMP': 'n_comp',
},
'fixed': {
'INIT_VOLUME': 1e-9,
'INIT_C': [0]
},
},
}
inv_unit_parameters_map = {
unit: {
'name': values['name'],
'parameters': {
v: k for k, v in values['parameters'].items()
}
} for unit, values in unit_parameters_map.items()
}
class UnitParametersGroup(ParameterWrapper):
"""Converter for UnitOperation parameters from CADETProcess to CADET.
See Also
--------
ParameterWrapper
AdsorptionParametersGroup
ReactionParametersGroup
"""
_baseClass = UnitBaseClass
_unit_parameters = unit_parameters_map
_model_parameters = _unit_parameters
_model_type = 'UNIT_TYPE'
adsorption_parameters_map = {
'NoBinding': {
'name': 'NONE',
'parameters': {},
},
'Linear': {
'name': 'LINEAR',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'LIN_KA': 'adsorption_rate',
'LIN_KD': 'desorption_rate'
},
},
'Langmuir': {
'name': 'MULTI_COMPONENT_LANGMUIR',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'MCL_KA': 'adsorption_rate',
'MCL_KD': 'desorption_rate',
'MCL_QMAX': 'capacity'
},
},
'LangmuirLDF': {
'name': 'MULTI_COMPONENT_LANGMUIR_LDF',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'MCLLDF_KEQ': 'equilibrium_constant',
'MCLLDF_KKIN': 'driving_force_coefficient',
'MCLLDF_QMAX': 'capacity'
},
},
'BiLangmuir': {
'name': 'MULTI_COMPONENT_BILANGMUIR',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'MCBL_KA': 'adsorption_rate',
'MCBL_KD': 'desorption_rate',
'MCBL_QMAX': 'capacity'
},
},
'BiLangmuirLDF': {
'name': 'MULTI_COMPONENT_BILANGMUIR_LDF',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'MCBLLDF_KEQ': 'equilibrium_constant',
'MCBLLDF_KKIN': 'driving_force_coefficient',
'MCBLLDF_QMAX': 'capacity'
},
},
'FreundlichLDF': {
'name': 'FREUNDLICH_LDF',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'FLDF_KKIN': 'driving_force_coefficient',
'FLDF_KF': 'freundlich_coefficient',
'FLDF_N': 'exponent'
},
},
'StericMassAction': {
'name': 'STERIC_MASS_ACTION',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'SMA_KA': 'adsorption_rate_transformed',
'SMA_KD': 'desorption_rate_transformed',
'SMA_LAMBDA': 'capacity',
'SMA_NU': 'characteristic_charge',
'SMA_SIGMA': 'steric_factor',
'SMA_REFC0': 'reference_liquid_phase_conc',
'SMA_REFQ': 'reference_solid_phase_conc'
},
},
'AntiLangmuir': {
'name': 'MULTI_COMPONENT_ANTILANGMUIR',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'MCAL_KA': 'adsorption_rate',
'MCAL_KD': 'desorption_rate',
'MCAL_QMAX': 'capacity',
'MCAL_ANTILANGMUIR': 'antilangmuir'
},
},
'MobilePhaseModulator': {
'name': 'MOBILE_PHASE_MODULATOR',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'MPM_KA': 'adsorption_rate',
'MPM_KD': 'desorption_rate',
'MPM_QMAX': 'capacity',
'MPM_BETA': 'ion_exchange_characteristic',
'MPM_GAMMA': 'hydrophobicity'
},
},
'ExtendedMobilePhaseModulator': {
'name': 'EXTENDED_MOBILE_PHASE_MODULATOR',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'EMPM_KA': 'adsorption_rate',
'EMPM_KD': 'desorption_rate',
'EMPM_QMAX': 'capacity',
'EMPM_BETA': 'ion_exchange_characteristic',
'EMPM_GAMMA': 'hydrophobicity',
'EMPM_COMP_MODE': 'component_mode',
},
},
'GeneralizedIonExchange': {
'name': 'GENERALIZED_ION_EXCHANGE',
'parameters': {
'IS_KINETIC': 'is_kinetic',
'GIEX_KA': 'adsorption_rate',
'GIEX_KA_LIN': 'adsorption_rate_linear',
'GIEX_KA_QUAD': 'adsorption_rate_quadratic',
'GIEX_KA_CUBE': 'adsorption_rate_cubic',
'GIEX_KA_SALT': 'adsorption_rate_salt',
'GIEX_KA_PROT': 'adsorption_rate_protein',
'GIEX_KD': 'desorption_rate',
'GIEX_KD_LIN': 'desorption_rate_linear',
'GIEX_KD_QUAD': 'desorption_rate_quadratic',
'GIEX_KD_CUBE': 'desorption_rate_cubic',
'GIEX_KD_SALT': 'desorption_rate_salt',
'GIEX_KD_PROT': 'desorption_rate_protein',
'GIEX_NU_BREAKS': 'characteristic_charge_breaks',
'GIEX_NU': 'characteristic_charge',
'GIEX_NU_LIN': 'characteristic_charge_linear',
'GIEX_NU_QUAD': 'characteristic_charge_quadratic',
'GIEX_NU_CUBE': 'characteristic_charge_cubic',
'GIEX_SIGMA': 'steric_factor',
'GIEX_LAMBDA': 'capacity',
'GIEX_REFC0': 'reference_liquid_phase_conc',
'GIEX_REFQ': 'reference_solid_phase_conc',
},
}
}
inv_adsorption_parameters_map = {
model: {
'name': values['name'],
'parameters': {
v: k for k, v in values['parameters'].items()
}
} for model, values in adsorption_parameters_map.items()
}
class AdsorptionParametersGroup(ParameterWrapper):
"""Converter for Binding model parameters from CADETProcess to CADET.
See Also
--------
ParameterWrapper
ReactionParametersGroup
UnitParametersGroup
"""
_baseClass = BindingBaseClass
_adsorption_parameters = adsorption_parameters_map
_model_parameters = _adsorption_parameters
_model_type = 'ADSORPTION_MODEL'
reaction_parameters_map = {
'NoReaction': {
'name': 'NONE',
'parameters': {},
},
'MassActionLaw': {
'name': 'MASS_ACTION_LAW',
'parameters': {
'mal_stoichiometry_bulk': 'stoich',
'mal_exponents_bulk_fwd': 'exponents_fwd',
'mal_exponents_bulk_bwd': 'exponents_bwd',
'mal_kfwd_bulk': 'k_fwd',
'mal_kbwd_bulk': 'k_bwd',
}
},
'MassActionLawParticle': {
'name': 'MASS_ACTION_LAW',
'parameters': {
'mal_stoichiometry_liquid': 'stoich_liquid',
'mal_exponents_liquid_fwd': 'exponents_fwd_liquid',
'mal_exponents_liquid_bwd': 'exponents_bwd_liquid',
'mal_kfwd_liquid': 'k_fwd_liquid',
'mal_kbwd_liquid': 'k_bwd_liquid',
'mal_stoichiometry_solid': 'stoich_solid',
'mal_exponents_solid_fwd': 'exponents_fwd_solid',
'mal_exponents_solid_bwd': 'exponents_bwd_solid',
'mal_kfwd_solid': 'k_fwd_solid',
'mal_kbwd_solid': 'k_bwd_solid',
'mal_exponents_liquid_fwd_modsolid':
'exponents_fwd_liquid_modsolid',
'mal_exponents_liquid_bwd_modsolid':
'exponents_bwd_liquid_modsolid',
'mal_exponents_solid_fwd_modliquid':
'exponents_fwd_solid_modliquid',
'mal_exponents_solid_bwd_modliquid':
'exponents_bwd_solid_modliquid',
}
}
}
inv_reaction_parameters_map = {
model: {
'name': values['name'],
'parameters': {
v: k for k, v in values['parameters'].items()
}
} for model, values in adsorption_parameters_map.items()
}
class ReactionParametersGroup(ParameterWrapper):
"""Converter for Reaction model parameters from CADETProcess to CADET.
See Also
--------
ParameterWrapper
AdsorptionParametersGroup
UnitParametersGroup
"""
_baseClass = ReactionBaseClass
_reaction_parameters = reaction_parameters_map
_model_parameters = _reaction_parameters
_model_type = 'REACTION_MODEL'
class SolverParametersGroup(ParametersGroup):
"""Class for defining the solver parameters for CADET.
Attributes
----------
nthreads : int
Number of used threads.
consistent_init_mode : int, optional
Consistent initialization mode.
Valid values are:
- 0: None
- 1: Full
- 2: Once, full
- 3: Lean
- 4: Once, lean
- 5: Full once, then lean
- 6: None once, then full
- 7: None once, then lean
The default is 1.
consistent_init_mode_sens : int, optional
Consistent initialization mode for parameter sensitivities.
Valid values are:
- 0: None
- 1: Full
- 2: Once, full
- 3: Lean
- 4: Once, lean
- 5: Full once, then lean
- 6: None once, then full
- 7: None once, then lean
The default is 1.
See Also
--------
ParametersGroup
"""
nthreads = UnsignedInteger(default=1)
consistent_init_mode = UnsignedInteger(default=1, ub=7)
consistent_init_mode_sens = UnsignedInteger(default=1, ub=7)
_parameters = [
'nthreads', 'consistent_init_mode', 'consistent_init_mode_sens'
]
class SolverTimeIntegratorParametersGroup(ParametersGroup):
"""Converter for time integartor parameters from CADETProcess to CADET.
Attributes
----------
abstol: float, optional
Absolute tolerance in the solution of the original system.
The default is 1e-8.
algtol: float, optional
Tolerance in the solution of the nonlinear consistency equations.
The default is 1e-12.
reltol: float, optional
Relative tolerance in the solution of the original system.
The default is 1e-6.
reltol_sens: float, optional
Relative tolerance in the solution of the sensitivity systems.
The default is 1e-12.
init_step_size: float, optional
Initial time integrator step size.
The default is 1e-6.
max_steps: int, optional
Maximum number of timesteps taken by IDAS
The default is 1000000.
max_step_size: float, optional
Maximum size of timesteps taken by IDAS.
The default is 0.0 (unlimited).
errortest_sens: bool, optional
If True: Use (forward) sensitivities in local error test
The default is True.
max_newton_iter: int, optional
Maximum number of Newton iterations in time step.
The default is 3.
max_errtest_fail: int, optional
Maximum number of local error test failures in time step
The default is 7.
max_convtest_fail: int, optional
Maximum number of Newton convergence test failures
The default is 10.
max_newton_iter_sens: int, optional
Maximum number of Newton iterations in forward sensitivity time step
The default is 3.
See Also
--------
ParametersGroup
"""
abstol = UnsignedFloat(default=1e-8)
algtol = UnsignedFloat(default=1e-12)
reltol = UnsignedFloat(default=1e-6)
reltol_sens = UnsignedFloat(default=1e-12)
init_step_size = UnsignedFloat(default=1e-6)
max_steps = UnsignedInteger(default=1000000)
max_step_size = UnsignedFloat(default=0.0)
errortest_sens = Bool(default=False)
max_newton_iter = UnsignedInteger(default=1000000)
max_errtest_fail = UnsignedInteger(default=1000000)
max_convtest_fail = UnsignedInteger(default=1000000)
max_newton_iter_sens = UnsignedInteger(default=1000000)
_parameters = [
'abstol', 'algtol', 'reltol', 'reltol_sens', 'init_step_size',
'max_steps', 'max_step_size', 'errortest_sens', 'max_newton_iter',
'max_errtest_fail', 'max_convtest_fail', 'max_newton_iter_sens'
]
class ReturnParametersGroup(ParametersGroup):
"""Converter for system solution writer config from CADETProcess to CADET.
See Also
--------
ParametersGroup
"""
write_solution_times = Bool(default=True)
write_solution_last = Bool(default=True)
write_sens_last = Bool(default=True)
split_components_data = Bool(default=False)
split_ports_data = Bool(default=False)
_parameters = [
'write_solution_times', 'write_solution_last', 'write_sens_last',
'split_components_data', 'split_ports_data'
]
class SensitivityParametersGroup(ParametersGroup):
"""Class for defining the sensitivity parameters.
See Also
--------
ParametersGroup
"""
sens_method = Switch(default='ad1', valid=['ad1'])
_parameters = ['sens_method'] | PypiClean |
/Flask-Scaffold-0.5.1.tar.gz/Flask-Scaffold-0.5.1/app/templates/static/node_modules/angular-grid/src/ts/toolPanel/groupSelectionPanel.ts |
module awk.grid {
var _ = Utils;
var svgFactory = SvgFactory.getInstance();
export class GroupSelectionPanel {
gridOptionsWrapper: any;
columnController: ColumnController;
inMemoryRowController: any;
cColumnList: any;
layout: any;
constructor(columnController: ColumnController, inMemoryRowController: any, gridOptionsWrapper: any) {
this.gridOptionsWrapper = gridOptionsWrapper;
this.setupComponents();
this.columnController = columnController;
this.inMemoryRowController = inMemoryRowController;
this.columnController.addChangeListener(this.columnsChanged.bind(this));
}
private columnsChanged() {
this.cColumnList.setModel(this.columnController.getPivotedColumns());
}
public addDragSource(dragSource: any) {
this.cColumnList.addDragSource(dragSource);
}
private columnCellRenderer(params: any) {
var column = params.value;
var colDisplayName = this.columnController.getDisplayNameForCol(column);
var eResult = document.createElement('span');
var eRemove = _.createIcon('columnRemoveFromGroup',
this.gridOptionsWrapper, column, svgFactory.createArrowUpSvg);
_.addCssClass(eRemove, 'ag-visible-icons');
eResult.appendChild(eRemove);
var that = this;
eRemove.addEventListener('click', function () {
that.columnController.removePivotColumn(column);
});
var eValue = document.createElement('span');
eValue.innerHTML = colDisplayName;
eResult.appendChild(eValue);
return eResult;
}
private setupComponents() {
var localeTextFunc = this.gridOptionsWrapper.getLocaleTextFunc();
var columnsLocalText = localeTextFunc('pivotedColumns', 'Pivoted Columns');
var pivotedColumnsEmptyMessage = localeTextFunc('pivotedColumnsEmptyMessage', 'Drag columns from above to pivot');
this.cColumnList = new AgList();
this.cColumnList.setCellRenderer(this.columnCellRenderer.bind(this));
this.cColumnList.addBeforeDropListener(this.onBeforeDrop.bind(this));
this.cColumnList.addItemMovedListener(this.onItemMoved.bind(this));
this.cColumnList.setEmptyMessage(pivotedColumnsEmptyMessage);
this.cColumnList.addStyles({height: '100%', overflow: 'auto'});
this.cColumnList.setReadOnly(true);
var eNorthPanel = document.createElement('div');
eNorthPanel.style.paddingTop = '10px';
eNorthPanel.innerHTML = '<div style="text-align: center;">' + columnsLocalText + '</div>';
this.layout = new BorderLayout({
center: this.cColumnList.getGui(),
north: eNorthPanel
});
}
private onBeforeDrop(newItem: any) {
this.columnController.addPivotColumn(newItem);
}
private onItemMoved(fromIndex: number, toIndex: number) {
this.columnController.movePivotColumn(fromIndex, toIndex);
}
}
} | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/sitemaps/views.py | import warnings
from django.contrib.sites.models import get_current_site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils import six
def index(request, sitemaps,
template_name='sitemap_index.xml', content_type='application/xml',
sitemap_url_name='django.contrib.sitemaps.views.sitemap',
mimetype=None):
if mimetype:
warnings.warn("The mimetype keyword argument is deprecated, use "
"content_type instead", PendingDeprecationWarning, stacklevel=2)
content_type = mimetype
req_protocol = 'https' if request.is_secure() else 'http'
req_site = get_current_site(request)
sites = []
for section, site in sitemaps.items():
if callable(site):
site = site()
protocol = req_protocol if site.protocol is None else site.protocol
sitemap_url = urlresolvers.reverse(
sitemap_url_name, kwargs={'section': section})
absolute_url = '%s://%s%s' % (protocol, req_site.domain, sitemap_url)
sites.append(absolute_url)
for page in range(2, site.paginator.num_pages + 1):
sites.append('%s?p=%s' % (absolute_url, page))
return TemplateResponse(request, template_name, {'sitemaps': sites},
content_type=content_type)
def sitemap(request, sitemaps, section=None,
template_name='sitemap.xml', content_type='application/xml',
mimetype=None):
if mimetype:
warnings.warn("The mimetype keyword argument is deprecated, use "
"content_type instead", PendingDeprecationWarning, stacklevel=2)
content_type = mimetype
req_protocol = 'https' if request.is_secure() else 'http'
req_site = get_current_site(request)
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps = [sitemaps[section]]
else:
maps = list(six.itervalues(sitemaps))
page = request.GET.get("p", 1)
urls = []
for site in maps:
try:
if callable(site):
site = site()
urls.extend(site.get_urls(page=page, site=req_site,
protocol=req_protocol))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
return TemplateResponse(request, template_name, {'urlset': urls},
content_type=content_type) | PypiClean |
/Agora-Planner-0.3.9.tar.gz/Agora-Planner-0.3.9/agora/planner/plan/agp.py | import logging
import re
from collections import namedtuple
from urlparse import urlparse
import networkx as nx
from rdflib import ConjunctiveGraph, URIRef, BNode, RDF, Literal
__author__ = 'Fernando Serena'
log = logging.getLogger('agora.planner.plan')
def extend_uri(uri, prefixes):
if ':' in uri:
prefix_parts = uri.split(':')
if len(prefix_parts) == 2 and prefix_parts[0] in prefixes:
return prefixes[prefix_parts[0]] + prefix_parts[1]
return uri
def is_variable(arg):
return arg.startswith('?')
def is_uri(uri, prefixes):
if uri.startswith('<') and uri.endswith('>'):
uri = uri.lstrip('<').rstrip('>')
parse = urlparse(uri, allow_fragments=True)
return bool(len(parse.scheme))
if ':' in uri:
prefix_parts = uri.split(':')
return len(prefix_parts) == 2 and prefix_parts[0] in prefixes
return False
class TP(namedtuple('TP', "s p o")):
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len, prefixes=None):
def transform_elm(elm):
if is_variable(elm):
return elm
elif is_uri(elm, prefixes):
elm = extend_uri(elm, prefixes)
return URIRef(elm.lstrip('<').rstrip('>'))
elif elm == 'a':
return RDF.type
else:
return Literal(elm)
if prefixes is None:
prefixes = []
res = filter(lambda x: x, map(transform_elm, iterable))
if len(res) == 3:
if not (isinstance(res[0], Literal) or isinstance(res[1], Literal)):
return new(cls, res)
raise TypeError('Bad TP arguments: {}'.format(iterable))
def __repr__(self):
def elm_to_string(elm):
if isinstance(elm, URIRef):
if elm == RDF.type:
return 'a'
return '<%s>' % elm
return str(elm)
strings = map(elm_to_string, [self.s, self.p, self.o])
return '{} {} {}'.format(*strings)
@staticmethod
def from_string(st, prefixes):
if st.endswith('"'):
parts = [st[st.find('"'):]]
st = st.replace(parts[0], '').rstrip()
parts = st.split(" ") + parts
else:
parts = st.split(' ')
return TP._make(parts, prefixes=prefixes)
class AgoraGP(object):
def __init__(self, prefixes):
self._tps = []
self.__prefixes = prefixes
@property
def triple_patterns(self):
return self._tps
@property
def prefixes(self):
return self.__prefixes
@property
def graph(self):
g = ConjunctiveGraph()
for prefix in self.__prefixes:
g.bind(prefix, self.__prefixes[prefix])
variables = {}
def nodify(elm):
if is_variable(elm):
if not (elm in variables):
elm_node = BNode(elm)
variables[elm] = elm_node
return variables[elm]
else:
if elm == 'a':
return RDF.type
elif elm.startswith('"'):
return Literal(elm.lstrip('"').rstrip('"'))
else:
try:
return float(elm)
except ValueError:
return URIRef(elm)
nxg = nx.Graph()
for (s, p, o) in self._tps:
nxg.add_nodes_from([s, o])
nxg.add_edge(s, o)
contexts = dict([(str(index), c) for (index, c) in enumerate(nx.connected_components(nxg))])
for (s, p, o) in self._tps:
s_node = nodify(s)
o_node = nodify(o)
p_node = nodify(p)
context = None
for uid in contexts:
if s in contexts[uid]:
context = str(uid)
g.get_context(context).add((s_node, p_node, o_node))
return g
@staticmethod
def from_string(st, prefixes):
gp = None
if st.startswith('{') and st.endswith('}'):
st = st.replace('{', '').replace('}', '').strip()
tps = re.split('\. ', st)
tps = map(lambda x: x.strip().strip('.'), filter(lambda y: y != '', tps))
gp = AgoraGP(prefixes)
for tp in tps:
gp.triple_patterns.append(TP.from_string(tp, gp.prefixes))
return gp
def __repr__(self):
tp_strings = map(lambda x: str(x), self._tps)
return '{ %s}' % reduce(lambda x, y: (x + '%s . ' % str(y)), tp_strings, '') | PypiClean |
/Automated_cartography-0.0.2-py3-none-any.whl/robot/shortshow.py | '''测试程序'''
import get_ip
'''测试图像显示,以及动作组调用'''
import cv2
import time
import LSC_Client
import threading
ip = get_ip.postaddress()
stream = "http://"+ip+":8080/?action=stream?dummy=param.mjpg"
cap = cv2.VideoCapture(stream)
lsc = LSC_Client.LSC_Client()
def Move():
lsc.MoveServo(6, 1500, 1000)
lsc.MoveServo(7, 1500, 1000)
time.sleep(1.1)
lsc.RunActionGroup(0, 1)
lsc.WaitForFinish(20000)
lsc.RunActionGroup(1, 5)
lsc.WaitForFinish(40000)
lsc.RunActionGroup(2, 5)
lsc.WaitForFinish(40000)
# -------------------------------------------------------
lsc.RunActionGroup(3, 1)
lsc.WaitForFinish(20000)
lsc.RunActionGroup(4, 1)
lsc.WaitForFinish(30000)
# lsc.RunActionGroup(5, 1)#前翻要翻车
# lsc.WaitForFinish(40000)
# lsc.RunActionGroup(6, 1)#后翻也要翻车
# lsc.WaitForFinish(40000)
lsc.RunActionGroup(7, 1)
lsc.WaitForFinish(60000)
# lsc.RunActionGroup(8, 1)#仰卧起坐也要翻车
# lsc.WaitForFinish(60000)
lsc.RunActionGroup(9, 1)
lsc.WaitForFinish(60000)
lsc.RunActionGroup(10, 1)
lsc.WaitForFinish(60000)
lsc.RunActionGroup(11, 1)
lsc.WaitForFinish(60000)
lsc.RunActionGroup(12, 1)
lsc.WaitForFinish(60000)
lsc.RunActionGroup(13, 1)
lsc.WaitForFinish(60000)
lsc.RunActionGroup(13, 1)
lsc.WaitForFinish(60000)
# lsc.RunActionGroup(14, 1)#下蹲要翻车的
# lsc.WaitForFinish(60000)
lsc.RunActionGroup(15, 1)
lsc.WaitForFinish(60000)
# lsc.RunActionGroup(16, 1)#街舞
# lsc.WaitForFinish(60000)
# lsc.RunActionGroup(17, 1)#江南style
# lsc.WaitForFinish(60000)
lsc.RunActionGroup(18, 1)#小苹果
lsc.WaitForFinish(60000)
# lsc.RunActionGroup(19, 1)#La Song 舞
# lsc.WaitForFinish(60000)
# lsc.RunActionGroup(20, 1)#倍儿爽舞蹈
# 作为子线程开启
th1 = threading.Thread(target=Move)
th1.setDaemon(True)
th1.start()
while True: # 图像显示
if cap.isOpened():
ret, orgFrame = cap.read()
if ret:
cv2.imshow("orgFrame", orgFrame)
cv2.waitKey(1)
else:
time.sleep(0.01)
else:
time.sleep(0.01) | PypiClean |
/OSF_EIMTC-0.1.46.tar.gz/OSF_EIMTC-0.1.46/src/EIMTC/models/_hybrid.py | from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Layer, BatchNormalization, MaxPool1D, Conv1D, ReLU, MaxPooling1D, Flatten, Bidirectional, GRU, LeakyReLU, Lambda, Dense, MaxPool2D, Conv2D, LSTM, Concatenate, Dropout
from tensorflow.keras.constraints import max_norm
from tensorflow import expand_dims
from tensorflow.keras import activations
import tensorflow as tf
from . import CustomDistiller
class Hybrid(CustomDistiller):
def __init__(self, payload_size=784, header_fields_packets=32, n_classes=[],merging_method='feat_concat') -> None:
super(Hybrid, self).__init__(
modalities=[
wang_payload_modality(payload_size),
lopez_protocol_header_fields_extended_modality(header_fields_packets),
stnn_inspired_image_extended_modality(),
graphdapp_modality()
],
adapter_size=128,
n_classes=n_classes,
merging_method=merging_method
)
def wang_payload_modality(payload_size=784):
input_layer_payload_modality = Input(shape=(payload_size,1), name='input_payload')
return Model(
name='Wang payload modality - nbytes',
inputs=input_layer_payload_modality,
outputs=stack([
input_layer_payload_modality,
Conv1D(16, 25, name='Conv1D_payload_1'),
ReLU(),
MaxPooling1D(3, name='MaxPooling1D_payload_1'),
Conv1D(32, 35, name='Conv1D_payload_2'),
ReLU(),
MaxPooling1D(3, name='MaxPooling1D_payload_2'),
Flatten(),
])
)
def lopez_protocol_header_fields_modality(packet_count=32):
input_layer_protocol_fields_modality = Input(shape=(packet_count,4), name='input_protocol_fields')
return Model(
name='Lopez protocol header fields modality',
inputs=input_layer_protocol_fields_modality,
outputs=stack([
input_layer_protocol_fields_modality,
Bidirectional(GRU(64, return_sequences=True, kernel_constraint=max_norm(3))),
ReLU(),
Flatten(),
])
)
def lopez_protocol_header_fields_extended_modality(packet_count=32):
input_layer_protocol_fields_modality = Input(shape=(packet_count,6), name='input_protocol_fields')
return Model(
name='Lopez protocol header fields extended modality',
inputs=input_layer_protocol_fields_modality,
outputs=stack([
input_layer_protocol_fields_modality,
Bidirectional(GRU(64, return_sequences=True, kernel_constraint=max_norm(3))),
ReLU(),
Flatten(),
])
)
def stnn_inspired_image_modality():
input_layer_stnn_modality = Input(shape=(5,14), name='input_stnn')
return Model(
name='STNN-inspired image modality',
inputs=input_layer_stnn_modality,
outputs=stack([
input_layer_stnn_modality,
Bidirectional(LSTM(65,return_sequences=True)),
Lambda(lambda x: expand_dims(x, axis=3)),
Conv2D(32,3,padding='same'),
LeakyReLU(),
Conv2D(32,3,padding='same'),
LeakyReLU(),
MaxPool2D(2),
Conv2D(64,3,padding='same'),
LeakyReLU(),
Conv2D(128,3,padding='same'),
LeakyReLU(),
MaxPool2D(2),
Flatten(),
Dense(512),
])
)
def stnn_inspired_image_extended_modality():
input_size = 177-42
input_layer_stnn_modality = Input(shape=(input_size,1), name='input_stnn')
return Model(
name='STNN-inspired image extended modality',
inputs=input_layer_stnn_modality,
outputs=stack([
input_layer_stnn_modality,
Bidirectional(LSTM(64,return_sequences=True)), #128
Conv1D(32,3,padding='same'),
LeakyReLU(),
#Conv1D(32,3,padding='same'),
#LeakyReLU(),
#MaxPool1D(2),
Conv1D(64,3,padding='same'),
LeakyReLU(),
#Conv1D(128,3,padding='same'),
#LeakyReLU(),
MaxPool1D(2),
Flatten(),
Dense(512),
])
)
def graphdapp_modality():
input_layer_graphdapp_adj_matrix_modality = Input(shape=(32,32),name='input_graphdapp_adj_matrix')
input_layer_graphdapp_features_modality = Input(shape=(32,1),name='input_graphdapp_features')
inputs_layer = [input_layer_graphdapp_adj_matrix_modality, input_layer_graphdapp_features_modality]
mlp1 = MLPLayer(64)
mlp2 = MLPLayer(64)
mlp3 = MLPLayer(64)
readout = Readout()
concat = Concatenate()
x1 = mlp1(inputs_layer)
x2 = mlp2(x1)
x3 = mlp3(x2)
x4 = readout([x1, x2, x3])
x4 = concat(x4)
return Model(
name='GraphDApp modality',
inputs=inputs_layer,
outputs= x4
)
class Readout(Layer):
def __init__(self):
super(Readout, self).__init__()
def call(self, inputs):
FsBatch = []
for i in range(len(inputs)):
Fbatch = inputs[i][1]
FsBatch.append(tf.reduce_sum(Fbatch, axis=-2))
return FsBatch
class MLPLayer(Layer):
def __init__(
self,
output_size=5,
activation='relu',
use_bias=True,
neighbour_agg_method='sum',
dropout_rate=0.025):
super(MLPLayer, self).__init__()
self.output_size = output_size
self.dense = Dense(self.output_size, use_bias=use_bias)
self.activation = activations.get(activation)
self.batch_norm = BatchNormalization()
self.dropout = Dropout(dropout_rate)
def build(self, input_shape):
self.eps = self.add_weight(
name='epsilon',
shape=(1,),
initializer="random_normal",
dtype='float32',
trainable=True,
)
def call(self, inputs, training=False):
'''
[A, F]
'''
A = inputs[0]
F = inputs[1]
outputs = tf.multiply(1.0+self.eps, F) + tf.matmul(A,F)
outputs = self.dense(outputs)
outputs = self.activation(outputs)
outputs = self.batch_norm(outputs, training=training)
outputs = self.dropout(outputs)
return [A, outputs]
def stack(layers):
'''
Using the Functional-API of Tensorflow to build a sequential
network (stacked layers) from list of layers.
'''
layer_stack = None
for layer in layers:
if layer_stack is None:
layer_stack = layer
else:
layer_stack = layer(layer_stack)
return layer_stack | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/crm/model/crm_contact_endpoint_request.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.crm.model.contact_request import ContactRequest
globals()['ContactRequest'] = ContactRequest
class CRMContactEndpointRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'model': (ContactRequest,), # noqa: E501
}
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'model': 'model', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, model, *args, **kwargs): # noqa: E501
"""CRMContactEndpointRequest - a model defined in OpenAPI
Args:
model (ContactRequest):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.model = model
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, model, *args, **kwargs): # noqa: E501
"""CRMContactEndpointRequest - a model defined in OpenAPI
Args:
model (ContactRequest):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.model: Union["ContactRequest"] = model | PypiClean |
/Misago-0.36.1.tar.gz/Misago-0.36.1/misago/threads/moderation/posts.py | from django.db import transaction
from django.utils import timezone
from django.utils.translation import gettext as _
from .exceptions import ModerationError
__all__ = [
"approve_post",
"protect_post",
"unprotect_post",
"unhide_post",
"hide_post",
"delete_post",
]
def approve_post(user, post):
if not post.is_unapproved:
return False
post.is_unapproved = False
post.save(update_fields=["is_unapproved"])
return True
def protect_post(user, post):
if post.is_protected:
return False
post.is_protected = True
post.save(update_fields=["is_protected"])
if post.is_best_answer:
post.thread.best_answer_is_protected = True
post.thread.save(update_fields=["best_answer_is_protected"])
return True
def unprotect_post(user, post):
if not post.is_protected:
return False
post.is_protected = False
post.save(update_fields=["is_protected"])
if post.is_best_answer:
post.thread.best_answer_is_protected = False
post.thread.save(update_fields=["best_answer_is_protected"])
return True
def unhide_post(user, post):
if post.is_first_post:
raise ModerationError(
_("You can't make original post visible without revealing thread.")
)
if not post.is_hidden:
return False
post.is_hidden = False
post.save(update_fields=["is_hidden"])
return True
def hide_post(user, post):
if post.is_first_post:
raise ModerationError(_("You can't hide original post without hiding thread."))
if post.is_hidden:
return False
post.is_hidden = True
post.hidden_by = user
post.hidden_by_name = user.username
post.hidden_by_slug = user.slug
post.hidden_on = timezone.now()
post.save(
update_fields=[
"is_hidden",
"hidden_by",
"hidden_by_name",
"hidden_by_slug",
"hidden_on",
]
)
return True
@transaction.atomic
def delete_post(user, post):
if post.is_first_post:
raise ModerationError(
_("You can't delete original post without deleting thread.")
)
post.delete()
return True | PypiClean |
/Corrfunc-2.5.1.tar.gz/Corrfunc-2.5.1/docs/source/modules/which_corrfunc.rst | .. _which_corrfunc:
***********************************
Which correlation function to use?
***********************************
Corrfunc has a variety of correlation functions to cover a broad range of Science applications. The basic distinction occurs if the input particles are directly
from a simulation or from an observational survey (or equivalently, a simulation that has been processed to look like a survey). For simulation data, referred throughout
as `theory`, the assumption is that the particle positions are Cartesian, co-moving XYZ. For survey data, referred throughout as `mocks`, the assumption is that
particle positions are `Right Ascension` (0 -- 360 deg), `Declination` (-90 -- 90 deg) and `CZ` (speed of light multiplied by the redshift). Depending on the exact
type of data, **and** the desired correlation function you want, the following table should help you figure out which code you should use.
+-------------------+---------------+-----------------+-----------------------------------------+-------------------------------+---------------------------------------+
| Input Data | Periodic | Particle domain | Desired correlation function | Returns | Python code |
+===================+===============+=================+=========================================+===============================+=======================================+
| X, Y, Z | True | Cube (box) | wp(:math:`r_p`) | 2-D Projected Correlation |:py:mod:`Corrfunc.theory.wp` |
| | | +-----------------------------------------+-------------------------------+---------------------------------------+
| | | | :math:`\xi(r)` | 3-D Real-space Correlation |:py:mod:`Corrfunc.theory.xi` |
+-------------------+---------------+-----------------+-----------------------------------------+-------------------------------+---------------------------------------+
| X, Y, Z | True or False | Arbitrary | :math:`\xi(r)` | Pair-counts in 3-D real-space |:py:mod:`Corrfunc.theory.DD` |
| | | +-----------------------------------------+-------------------------------+---------------------------------------+
| | | | :math:`\xi(r_p, \pi)` | Pair-counts in 2-D |:py:mod:`Corrfunc.theory.DDrppi` |
| | | +-----------------------------------------+-------------------------------+---------------------------------------+
| | | | :math:`\xi(s, \mu)` | Pair-counts in 2-D |:py:mod:`Corrfunc.theory.DDsmu` |
+-------------------+---------------+-----------------+-----------------------------------------+-------------------------------+---------------------------------------+
| ra, dec, cz | False | Arbitrary | :math:`\xi(r_p, \pi)` | Pair-counts in 2-D |:py:mod:`Corrfunc.mocks.DDrppi_mocks` |
| | | +-----------------------------------------+-------------------------------+---------------------------------------+
| | | | :math:`\xi(s, \mu)` | Pair-counts in 2-D |:py:mod:`Corrfunc.mocks.DDsmu_mocks` |
+-------------------+---------------+-----------------+-----------------------------------------+-------------------------------+---------------------------------------+
| ra, dec | False | Arbitrary | :math:`\omega(\theta)` | Pair-counts in angular space |:py:mod:`Corrfunc.mocks.DDtheta_mocks` |
+-------------------+---------------+-----------------+-----------------------------------------+-------------------------------+---------------------------------------+
In all cases where only pair-counts are returned (e.g., all of the `mocks` routines), you will need to compute at least
an additional `RR` term. Please see :py:mod:`Corrfunc.utils.convert_3d_counts_to_cf` to
convert 3-D pair-counts (or angular pair counts) into a correlation
function. For 2-D pair-counts, please use :py:mod:`Corrfunc.utils.convert_rp_pi_counts_to_wp`
to convert into a projected correlation function. If you want to compute
the :math:`\xi(r_p, \pi)` from the 2-D pair-counts, then simply call
:py:mod:`Corrfunc.utils.convert_3d_counts_to_cf` with the arrays.
Also, see :ref:`commandline-interface` for a detailed list of the clustering statistics and the various available API interfaces.
| PypiClean |
/DeepCellTL-0.12.5.tar.gz/DeepCellTL-0.12.5/deepcell/model_zoo/tracking.py | """Assortment of CNN (and GNN) architectures for tracking single cells"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import ast
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import TimeDistributed, Conv2D, Conv3D, LSTM
from tensorflow.keras.layers import Input, Concatenate, InputLayer
from tensorflow.keras.layers import Add, Subtract, Dense, Reshape
from tensorflow.keras.layers import MaxPool3D
from tensorflow.keras.layers import Activation, Softmax
from tensorflow.keras.layers import LayerNormalization, BatchNormalization, Lambda
from tensorflow.keras.regularizers import l2
from spektral.layers import GCSConv, GCNConv, GATConv
from deepcell.layers import ImageNormalization2D
from deepcell.layers import Comparison, DeltaReshape, Unmerge, TemporalMerge
def siamese_model(input_shape=None,
features=None,
neighborhood_scale_size=10,
reg=1e-5,
init='he_normal',
filter_size=61):
"""Creates a tracking model based on Siamese Neural Networks(SNNs).
Args:
input_shape (tuple): If no input tensor, create one with this shape.
features (list): Number of output features
neighborhood_scale_size (int): number of input channels
reg (int): regularization value
init (str): Method for initalizing weights
filter_size (int): the receptive field of the neural network
Returns:
tensorflow.keras.Model: 2D FeatureNet
"""
def compute_input_shape(feature):
if feature == 'appearance':
return input_shape
elif feature == 'distance':
return (None, 2)
elif feature == 'neighborhood':
return (None, 2 * neighborhood_scale_size + 1,
2 * neighborhood_scale_size + 1,
input_shape[-1])
elif feature == 'regionprop':
return (None, 3)
else:
raise ValueError('siamese_model.compute_input_shape: '
'Unknown feature `{}`'.format(feature))
def compute_reshape(feature):
if feature == 'appearance':
return (64,)
elif feature == 'distance':
return (2,)
elif feature == 'neighborhood':
return (64,)
elif feature == 'regionprop':
return (3,)
else:
raise ValueError('siamese_model.compute_output_shape: '
'Unknown feature `{}`'.format(feature))
def compute_feature_extractor(feature, shape):
if feature == 'appearance':
# This should not stay: channels_first/last should be used to
# dictate size (1 works for either right now)
N_layers = np.int(np.floor(np.log2(input_shape[1])))
feature_extractor = Sequential()
feature_extractor.add(InputLayer(input_shape=shape))
# feature_extractor.add(ImageNormalization2D('std', filter_size=32))
for layer in range(N_layers):
feature_extractor.add(Conv3D(64, (1, 3, 3),
kernel_initializer=init,
padding='same',
kernel_regularizer=l2(reg)))
feature_extractor.add(BatchNormalization(axis=channel_axis))
feature_extractor.add(Activation('relu'))
feature_extractor.add(MaxPool3D(pool_size=(1, 2, 2)))
feature_extractor.add(Reshape((-1, 64)))
return feature_extractor
elif feature == 'distance':
return None
elif feature == 'neighborhood':
N_layers_og = np.int(np.floor(np.log2(2 * neighborhood_scale_size + 1)))
feature_extractor_neighborhood = Sequential()
feature_extractor_neighborhood.add(
InputLayer(input_shape=shape)
)
for layer in range(N_layers_og):
feature_extractor_neighborhood.add(Conv3D(64, (1, 3, 3),
kernel_initializer=init,
padding='same',
kernel_regularizer=l2(reg)))
feature_extractor_neighborhood.add(BatchNormalization(axis=channel_axis))
feature_extractor_neighborhood.add(Activation('relu'))
feature_extractor_neighborhood.add(MaxPool3D(pool_size=(1, 2, 2)))
feature_extractor_neighborhood.add(Reshape((-1, 64)))
return feature_extractor_neighborhood
elif feature == 'regionprop':
return None
else:
raise ValueError('siamese_model.compute_feature_extractor: '
'Unknown feature `{}`'.format(feature))
if features is None:
raise ValueError('siamese_model: No features specified.')
if K.image_data_format() == 'channels_first':
channel_axis = 1
raise ValueError('siamese_model: Only channels_last is supported.')
else:
channel_axis = -1
input_shape = tuple([None] + list(input_shape))
features = sorted(features)
inputs = []
outputs = []
for feature in features:
in_shape = compute_input_shape(feature)
re_shape = compute_reshape(feature)
feature_extractor = compute_feature_extractor(feature, in_shape)
layer_1 = Input(shape=in_shape, name='{}_input1'.format(feature))
layer_2 = Input(shape=in_shape, name='{}_input2'.format(feature))
inputs.extend([layer_1, layer_2])
# apply feature_extractor if it exists
if feature_extractor is not None:
layer_1 = feature_extractor(layer_1)
layer_2 = feature_extractor(layer_2)
# LSTM on 'left' side of network since that side takes in stacks of features
layer_1 = LSTM(64)(layer_1)
layer_2 = Reshape(re_shape)(layer_2)
outputs.append([layer_1, layer_2])
dense_merged = []
for layer_1, layer_2 in outputs:
merge = Concatenate(axis=channel_axis)([layer_1, layer_2])
dense_merge = Dense(128)(merge)
bn_merge = BatchNormalization(axis=channel_axis)(dense_merge)
dense_relu = Activation('relu')(bn_merge)
dense_merged.append(dense_relu)
# Concatenate outputs from both instances
merged_outputs = Concatenate(axis=channel_axis)(dense_merged)
# Add dense layers
dense1 = Dense(128)(merged_outputs)
bn1 = BatchNormalization(axis=channel_axis)(dense1)
relu1 = Activation('relu')(bn1)
dense2 = Dense(128)(relu1)
bn2 = BatchNormalization(axis=channel_axis)(dense2)
relu2 = Activation('relu')(bn2)
dense3 = Dense(3, activation='softmax', name='classification', dtype=K.floatx())(relu2)
# Instantiate model
final_layer = dense3
model = Model(inputs=inputs, outputs=final_layer)
return model
class GNNTrackingModel(object):
"""Creates a tracking model based on Graph Neural Networks(GNNs).
Args:
max_cells (int): maximum number of tracks per movie in dataset
track_length (int): track length (parameter defined in dataset obj)
n_filters (int): Number of filters
encoder_dim (int): Dimension of encoder
embedding_dim (int): Dimension of embedding
n_layers (int): number of layers
graph_layer (str): Must be one of {'gcs', 'gcn', 'gat'}
Additional kwargs for the graph layers can be encoded in the following format
``<layer name>-kwarg:value-kwarg:value``
appearance_shape (tuple): shape of each object's appearance tensor
norm_layer (str): Must be one of {'layer', 'batch'}
"""
def __init__(self,
max_cells=39,
track_length=8,
n_filters=64,
encoder_dim=64,
embedding_dim=64,
n_layers=3,
graph_layer='gcs',
appearance_shape=(32, 32, 1),
norm_layer='batch'):
self.n_filters = n_filters
self.encoder_dim = encoder_dim
self.embedding_dim = embedding_dim
self.n_layers = n_layers
self.max_cells = max_cells
self.track_length = track_length
if len(appearance_shape) != 3:
raise ValueError('appearanace_shape should be a '
'tuple of length 3.')
log2 = math.log(appearance_shape[0], 2)
if appearance_shape[0] != appearance_shape[1] or int(log2) != log2:
raise ValueError('appearance_shape should have square dimensions '
'and each side should be a power of 2.')
graph_layer_name = str(graph_layer.split('-')[0]).lower()
if graph_layer_name not in {'gcn', 'gcs', 'gat'}:
raise ValueError('Invalid graph_layer: {}'.format(graph_layer_name))
self.graph_layer = graph_layer
norm_options = {'layer', 'batch'}
if norm_layer not in norm_options:
raise ValueError('Invalid normalization layer {}. Must be one of {}.'.format(
norm_layer, norm_options))
if norm_layer == 'layer':
self.norm_layer = LayerNormalization
self.norm_layer_prefix = 'ln'
elif norm_layer == 'batch':
self.norm_layer = BatchNormalization
self.norm_layer_prefix = 'bn'
# Use inputs to build expected shapes
base_shape = [self.track_length, self.max_cells]
self.appearance_shape = tuple(base_shape + list(appearance_shape))
self.morphology_shape = tuple(base_shape + [3])
self.centroid_shape = tuple(base_shape + [2])
self.adj_shape = tuple(base_shape + [self.max_cells])
# Create encoders and decoders
self.unmerge_embeddings_model = self.get_unmerge_embeddings_model()
self.unmerge_centroids_model = self.get_unmerge_centroids_model()
self.embedding_temporal_merge_model = self.get_embedding_temporal_merge_model()
self.delta_temporal_merge_model = self.get_delta_temporal_merge_model()
self.appearance_encoder = self.get_appearance_encoder()
self.morphology_encoder = self.get_morphology_encoder()
self.centroid_encoder = self.get_centroid_encoder()
self.delta_encoder, self.delta_across_frames_encoder = self.get_delta_encoders()
self.neighborhood_encoder = self.get_neighborhood_encoder()
self.tracking_decoder = self.get_tracking_decoder()
# Create branches
self.training_branch = self.get_training_branch()
self.inference_branch = self.get_inference_branch()
# Create model
self.training_model, self.inference_model = self.get_models()
def get_embedding_temporal_merge_model(self):
inputs = Input(shape=(None, None, self.embedding_dim),
name='embedding_temporal_merge_input')
x = TemporalMerge(self.embedding_dim, name='emb_tm')(inputs)
return Model(inputs=inputs, outputs=x, name='embedding_temporal_merge')
def get_delta_temporal_merge_model(self):
inputs = Input(shape=(None, None, self.encoder_dim),
name='centroid_temporal_merge_input')
x = inputs
x = TemporalMerge(self.encoder_dim, name='delta_tm')(inputs)
return Model(inputs=inputs, outputs=x, name='delta_temporal_merge')
def get_appearance_encoder(self):
app_shape = tuple([None] + list(self.appearance_shape)[2:])
inputs = Input(shape=app_shape, name='encoder_app_input')
x = inputs
x = TimeDistributed(ImageNormalization2D(norm_method='whole_image',
name='imgnrm_ae'))(x)
for i in range(int(math.log(app_shape[1], 2))):
x = Conv3D(self.n_filters,
(1, 3, 3),
strides=1,
padding='same',
use_bias=False, name='conv3d_ae{}'.format(i))(x)
x = self.norm_layer(axis=-1, name='{}_ae{}'.format(self.norm_layer_prefix, i))(x)
x = Activation('relu', name='relu_ae{}'.format(i))(x)
x = MaxPool3D(pool_size=(1, 2, 2))(x)
x = Lambda(lambda t: tf.squeeze(t, axis=(2, 3)))(x)
x = Dense(self.encoder_dim, name='dense_aeout')(x)
x = self.norm_layer(axis=-1, name='{}_aeout'.format(self.norm_layer_prefix))(x)
x = Activation('relu', name='appearance_embedding')(x)
return Model(inputs=inputs, outputs=x)
def get_morphology_encoder(self):
morph_shape = (None, self.morphology_shape[-1])
inputs = Input(shape=morph_shape, name='encoder_morph_input')
x = inputs
x = Dense(self.encoder_dim, name='dense_me')(x)
x = self.norm_layer(axis=-1, name='{}_me'.format(self.norm_layer_prefix))(x)
x = Activation('relu', name='morphology_embedding')(x)
return Model(inputs=inputs, outputs=x)
def get_centroid_encoder(self):
centroid_shape = (None, self.centroid_shape[-1])
inputs = Input(shape=centroid_shape, name='encoder_centroid_input')
x = inputs
x = Dense(self.encoder_dim, name='dense_ce')(x)
x = self.norm_layer(axis=-1, name='{}_ce'.format(self.norm_layer_prefix))(x)
x = Activation('relu', name='centroid_embedding')(x)
return Model(inputs=inputs, outputs=x)
def get_delta_encoders(self):
inputs = Input(shape=(None, None, self.centroid_shape[-1]),
name='encoder_delta_input')
inputs_across_frames = Input(shape=(None, None, None, self.centroid_shape[-1]),
name='encoder_delta_across_frames_input')
d = Dense(self.encoder_dim, name='dense_des')
a = Activation('relu', name='relu_des')
x_0 = d(inputs)
x_0 = self.norm_layer(axis=-1, name='{}_des0'.format(self.norm_layer_prefix))(x_0)
x_0 = a(x_0)
x_1 = d(inputs_across_frames)
x_1 = self.norm_layer(axis=-1, name='{}_des1'.format(self.norm_layer_prefix))(x_1)
x_1 = a(x_1)
delta_encoder = Model(inputs=inputs, outputs=x_0)
delta_across_frames_encoder = Model(inputs=inputs_across_frames, outputs=x_1)
return delta_encoder, delta_across_frames_encoder
def get_neighborhood_encoder(self):
app_input = self.appearance_encoder.input
morph_input = self.morphology_encoder.input
centroid_input = self.centroid_encoder.input
adj_input = Input(shape=(None, None), name='encoder_adj_input')
app_features = self.appearance_encoder.output
morph_features = self.morphology_encoder.output
centroid_features = self.centroid_encoder.output
adj = adj_input
# Concatenate features
node_features = Concatenate(axis=-1)([app_features, morph_features, centroid_features])
node_features = Dense(self.n_filters, name='dense_ne0')(node_features)
node_features = self.norm_layer(axis=-1, name='{}_ne0'.format(self.norm_layer_prefix)
)(node_features)
node_features = Activation('relu', name='relu_ne0')(node_features)
# Apply graph convolution
# Extract and define layer name
graph_layer_name = str(self.graph_layer.split('-')[0]).lower()
# Extract layer kwargs
split = self.graph_layer.split('-')
layer_kwargs = {}
if len(split) > 1:
for item in split[1:]:
k, v = item.split(':')
# Cast value to correct type
try:
layer_kwargs[k] = ast.literal_eval(v)
except ValueError:
layer_kwargs[k] = v
for i in range(self.n_layers):
name = '{}{}'.format(graph_layer_name, i)
if graph_layer_name == 'gcn':
graph_layer = GCNConv(self.n_filters, activation=None, name=name, **layer_kwargs)
elif graph_layer_name == 'gcs':
graph_layer = GCSConv(self.n_filters, activation=None, name=name, **layer_kwargs)
elif graph_layer_name == 'gat':
graph_layer = GATConv(self.n_filters, activation=None, name=name, **layer_kwargs)
else:
raise ValueError('Unexpected graph_layer: {}'.format(graph_layer_name))
node_features = graph_layer([node_features, adj])
node_features = self.norm_layer(axis=-1,
name='{}_ne{}'.format(self.norm_layer_prefix, i + 1)
)(node_features)
node_features = Activation('relu', name='relu_ne{}'.format(i + 1))(node_features)
concat = Concatenate(axis=-1)([app_features, morph_features, node_features])
node_features = Dense(self.embedding_dim, name='dense_nef')(concat)
node_features = self.norm_layer(axis=-1, name='{}_nef'.format(self.norm_layer_prefix)
)(node_features)
node_features = Activation('relu', name='relu_nef')(node_features)
inputs = [app_input, morph_input, centroid_input, adj_input]
outputs = [node_features, centroid_input]
return Model(inputs=inputs, outputs=outputs, name='neighborhood_encoder')
def get_unmerge_embeddings_model(self):
inputs = Input(shape=(self.appearance_shape[1], self.embedding_dim),
name='unmerge_embeddings_input')
x = inputs
x = Unmerge(self.track_length,
self.appearance_shape[1],
self.embedding_dim,
name='unmerge_embeddings')(x)
return Model(inputs=inputs, outputs=x, name='unmerge_embeddings_model')
def get_unmerge_centroids_model(self):
inputs = Input(shape=(self.centroid_shape[1], self.centroid_shape[-1]),
name='unmerge_centroids_input')
x = inputs
x = Unmerge(self.track_length,
self.centroid_shape[1],
self.centroid_shape[2],
name='unmerge_centroids')(x)
return Model(inputs=inputs, outputs=x, name='unmerge_centroids_model')
def _get_deltas(self, x):
"""Convert raw positions to deltas"""
deltas = Lambda(lambda t: t[:, 1:] - t[:, 0:-1])(x)
deltas = Lambda(lambda t: tf.pad(t, tf.constant([[0, 0], [1, 0], [0, 0], [0, 0]])))(deltas)
return deltas
def _get_deltas_across_frames(self, centroids):
"""Find deltas across frames"""
centroid_current = Lambda(lambda t: t[:, 0:-1])(centroids)
centroid_future = Lambda(lambda t: t[:, 1:])(centroids)
centroid_current = Lambda(lambda t: tf.expand_dims(t, 3))(centroid_current)
centroid_future = Lambda(lambda t: tf.expand_dims(t, 2))(centroid_future)
deltas_across_frames = Subtract()([centroid_future, centroid_current])
return deltas_across_frames
def get_training_branch(self):
# Define inputs
app_input = Input(shape=self.appearance_shape, name='appearances')
morph_input = Input(shape=self.morphology_shape, name='morphologies')
centroid_input = Input(shape=self.centroid_shape, name='centroids')
adj_input = Input(shape=self.adj_shape, name='adj_matrices')
inputs = [app_input, morph_input, centroid_input, adj_input]
# Merge batch and temporal dimensions
new_app_shape = tuple([-1] + list(self.appearance_shape)[1:])
reshaped_app_input = Lambda(lambda t: tf.reshape(t, new_app_shape),
name='reshaped_appearances')(app_input)
new_morph_shape = tuple([-1] + list(self.morphology_shape)[1:])
reshaped_morph_input = Lambda(lambda t: tf.reshape(t, new_morph_shape),
name='reshaped_morphologies')(morph_input)
new_centroid_shape = tuple([-1] + list(self.centroid_shape)[1:])
reshaped_centroid_input = Lambda(lambda t: tf.reshape(t, new_centroid_shape),
name='reshaped_centroids')(centroid_input)
new_adj_shape = [-1, self.adj_shape[1], self.adj_shape[2]]
reshaped_adj_input = Lambda(lambda t: tf.reshape(t, new_adj_shape),
name='reshaped_adj_matrices')(adj_input)
reshaped_inputs = [
reshaped_app_input,
reshaped_morph_input,
reshaped_centroid_input,
reshaped_adj_input
]
x, centroids = self.neighborhood_encoder(reshaped_inputs)
# Reshape embeddings to add back temporal dimension
x = self.unmerge_embeddings_model(x)
centroids = self.unmerge_centroids_model(centroids)
# Get current and future embeddings
x_current = Lambda(lambda t: t[:, 0:-1])(x)
x_future = Lambda(lambda t: t[:, 1:])(x)
# Integrate temporal information for embeddings and compare
x_current = self.embedding_temporal_merge_model(x_current)
x = Comparison(name='training_embedding_comparison')([x_current, x_future])
# Convert centroids to deltas
deltas_current = self._get_deltas(centroids)
deltas_future = self._get_deltas_across_frames(centroids)
deltas_current = Activation(tf.math.abs, name='act_dc_tb')(deltas_current)
deltas_future = Activation(tf.math.abs, name='act_df_tb')(deltas_future)
deltas_current = self.delta_encoder(deltas_current)
deltas_future = self.delta_across_frames_encoder(deltas_future)
deltas_current = Lambda(lambda t: t[:, 0:-1])(deltas_current)
deltas_current = self.delta_temporal_merge_model(deltas_current)
deltas_current = Lambda(lambda t: tf.expand_dims(t, 3))(deltas_current)
multiples = [1, 1, 1, self.centroid_shape[1], 1]
deltas_current = Lambda(lambda t: tf.tile(t, multiples))(deltas_current)
deltas = Concatenate(axis=-1)([deltas_current, deltas_future])
outputs = [x, deltas]
# Create submodel
return Model(inputs=inputs, outputs=outputs, name='training_branch')
def get_inference_branch(self):
# batch size, tracks
current_embedding = Input(shape=(None, None, self.embedding_dim),
name='current_embeddings')
current_centroids = Input(shape=(None, None, self.centroid_shape[-1]),
name='current_centroids')
future_embedding = Input(shape=(1, None, self.embedding_dim),
name='future_embeddings')
future_centroids = Input(shape=(1, None, self.centroid_shape[-1]),
name='future_centroids')
inputs = [current_embedding, current_centroids,
future_embedding, future_centroids]
# Embeddings - Integrate temporal information
x_current = self.embedding_temporal_merge_model(current_embedding)
# Embeddings - Get final frame from current track
x_current = Lambda(lambda t: t[:, -1:])(x_current)
x = Comparison(name='inference_comparison')([x_current, future_embedding])
# Centroids - Get deltas
deltas_current = self._get_deltas(current_centroids)
deltas_current = Activation(tf.math.abs, name='act_dc_ib')(deltas_current)
deltas_current = self.delta_encoder(deltas_current)
deltas_current = self.delta_temporal_merge_model(deltas_current)
deltas_current = Lambda(lambda t: t[:, -1:])(deltas_current)
# Centroids - Get deltas across frames
centroid_current_end = Lambda(lambda t: t[:, -1:])(current_centroids)
centroid_current_end = Lambda(lambda t: tf.expand_dims(t, 3))(centroid_current_end)
centroid_future = Lambda(lambda t: tf.expand_dims(t, 2))(future_centroids)
deltas_future = Subtract()([centroid_future, centroid_current_end])
deltas_future = Activation(tf.math.abs, name='act_df_ib')(deltas_future)
deltas_future = self.delta_across_frames_encoder(deltas_future)
deltas_current = DeltaReshape(name='delta_reshape')([deltas_current, future_centroids])
deltas = Concatenate(axis=-1)([deltas_current, deltas_future])
outputs = [x, deltas]
return Model(inputs=inputs, outputs=outputs, name='inference_branch')
def get_tracking_decoder(self):
embedding_input = Input(shape=(None, None, None, 2 * self.embedding_dim))
deltas_input = Input(shape=(None, None, None, 2 * self.encoder_dim))
embedding = Concatenate(axis=-1)([embedding_input, deltas_input])
embedding = Dense(self.n_filters, name='dense_td0')(embedding)
embedding = self.norm_layer(axis=-1, name='{}_td0'.format(self.norm_layer_prefix)
)(embedding)
embedding = Activation('relu', name='relu_td0')(embedding)
# TODO: set to n_classes
embedding = Dense(3, name='dense_outembed')(embedding)
# Add classification head
output = Softmax(axis=-1, name='softmax_comparison')(embedding)
return Model(inputs=[embedding_input, deltas_input],
outputs=output,
name='tracking_decoder')
def get_models(self):
# Create inputs
training_inputs = self.training_branch.input
inference_inputs = self.inference_branch.input
# Apply decoder
training_output = self.tracking_decoder(self.training_branch.output)
inference_output = self.tracking_decoder(self.inference_branch.output)
# Name the training output layer
training_output = Lambda(lambda t: t, name='temporal_adj_matrices')(training_output)
training_model = Model(inputs=training_inputs, outputs=training_output)
inference_model = Model(inputs=inference_inputs, outputs=inference_output)
return training_model, inference_model | PypiClean |
/Aquila_Resolve-0.1.4-py3-none-any.whl/Aquila_Resolve/models/dp/preprocessing/text.py | from typing import List, Iterable, Dict, Tuple, Any
class LanguageTokenizer:
"""Simple tokenizer for language to index mapping."""
def __init__(self, languages: List[str]) -> None:
"""
Initializes a language tokenizer for a list of languages.
Args:
languages (List[str]): List of languages, e.g. ['de', 'en'].
"""
self.lang_index = {l: i for i, l in enumerate(languages)}
self.index_lang = {i: l for i, l in enumerate(languages)}
def __call__(self, lang: str) -> int:
"""
Maps the language to an index.
Args:
lang (str): Language to be mapped, e.g. 'de'.
Returns:
int: Index of language.
"""
if lang not in self.lang_index:
raise ValueError(
f"Language not supported: {lang}. "
f"Supported languages: {self.lang_index.keys()}"
)
return self.lang_index[lang]
def decode(self, index: int) -> str:
"""Inverts the index mapping of a language.
Args:
index (int): Index of language.
Returns:
str: Language for the given index.
"""
return self.index_lang[index]
class SequenceTokenizer:
"""Tokenizes text and optionally attaches language-specific start index (and non-specific end index)."""
def __init__(
self,
symbols: List[str],
languages: List[str],
char_repeats: int,
lowercase: bool = True,
append_start_end: bool = True,
pad_token="_",
end_token="<end>",
) -> None:
"""
Initializes a SequenceTokenizer object.
Args:
symbols (List[str]): Character (or phoneme) symbols.
languages (List[str]): List of languages.
char_repeats (int): Number of repeats for each character to allow the forward model to map to longer
phoneme sequences. Example: for char_repeats=2 the tokenizer maps hi -> hhii.
lowercase (bool): Whether to lowercase the input word.
append_start_end (bool): Whether to append special start and end tokens. Start and end tokens are
index mappings of the chosen language.
pad_token (str): Special pad token for index 0.
end_token (str): Special end of sequence token.
"""
self.languages = languages
self.lowercase = lowercase
self.char_repeats = char_repeats
self.append_start_end = append_start_end
self.pad_index = 0
self.token_to_idx = {pad_token: self.pad_index}
self.special_tokens = {pad_token, end_token}
for lang in languages:
lang_token = self._make_start_token(lang)
self.token_to_idx[lang_token] = len(self.token_to_idx)
self.special_tokens.add(lang_token)
self.token_to_idx[end_token] = len(self.token_to_idx)
self.end_index = self.token_to_idx[end_token]
for symbol in symbols:
self.token_to_idx[symbol] = len(self.token_to_idx)
self.idx_to_token = {i: s for s, i in self.token_to_idx.items()}
self.vocab_size = len(self.idx_to_token)
def __call__(self, sentence: Iterable[str], language: str) -> List[int]:
"""
Maps a sequence of symbols for a language to a sequence of indices.
Args:
sentence (Iterable[str]): Sentence (or word) as a sequence of symbols.
language (str): Language for the mapping that defines the start and end token indices.
Returns:
List[int]: Sequence of token indices.
"""
sentence = [item for item in sentence for i in range(self.char_repeats)]
if language not in self.languages:
raise ValueError(
f"Language not supported: {language}. Supported languages: {self.languages}"
)
if self.lowercase:
sentence = [s.lower() for s in sentence]
sequence = [self.token_to_idx[c] for c in sentence if c in self.token_to_idx]
if self.append_start_end:
sequence = [self._get_start_index(language)] + sequence + [self.end_index]
return sequence
def decode(
self, sequence: Iterable[int], remove_special_tokens: bool = False
) -> List[str]:
"""Maps a sequence of indices to a sequence of symbols.
Args:
sequence (Iterable[int]): Encoded sequence to be decoded.
remove_special_tokens (bool): Whether to remove special tokens such as pad or start and end tokens. (Default value = False)
sequence: Iterable[int]:
Returns:
List[str]: Decoded sequence of symbols.
"""
sequence = list(sequence)
if self.append_start_end:
sequence = (
sequence[:1] + sequence[1 : -1 : self.char_repeats] + sequence[-1:]
)
else:
sequence = sequence[:: self.char_repeats]
decoded = [
self.idx_to_token[int(t)] for t in sequence if int(t) in self.idx_to_token
]
if remove_special_tokens:
decoded = [d for d in decoded if d not in self.special_tokens]
return decoded
def _get_start_index(self, language: str) -> int:
lang_token = self._make_start_token(language)
return self.token_to_idx[lang_token]
def _make_start_token(self, language: str) -> str:
return "<" + language + ">"
class Preprocessor:
"""Preprocesses data for a phonemizer training session."""
def __init__(
self,
lang_tokenizer: LanguageTokenizer,
text_tokenizer: SequenceTokenizer,
phoneme_tokenizer: SequenceTokenizer,
) -> None:
"""
Initializes a preprocessor object.
Args:
lang_tokenizer (LanguageTokenizer): Tokenizer for input language.
text_tokenizer (SequenceTokenizer): Tokenizer for input text.
phoneme_tokenizer (SequenceTokenizer): Tokenizer for output phonemes.
"""
self.lang_tokenizer = lang_tokenizer
self.text_tokenizer = text_tokenizer
self.phoneme_tokenizer = phoneme_tokenizer
def __call__(
self, item: Tuple[str, Iterable[str], Iterable[str]]
) -> Tuple[int, List[int], List[int]]:
"""
Preprocesses a data point.
Args:
item (Tuple): Data point comprised of (language, input text, output phonemes).
Returns: Tuple: Preprocessed data point as (language tokens, input_text tokens, output phonemes tokens)
"""
lang, text, phonemes = item
lang_token = self.lang_tokenizer(lang)
text_tokens = self.text_tokenizer(text, lang)
phoneme_tokens = self.phoneme_tokenizer(phonemes, lang)
return lang_token, text_tokens, phoneme_tokens
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "Preprocessor":
"""Initializes a preprocessor from a config.
Args:
config (Dict[str, Any]): Dictionary containing preprocessing hyperparams.
Returns:
Preprocessor: Preprocessor object.
"""
text_symbols = config["preprocessing"]["text_symbols"]
phoneme_symbols = config["preprocessing"]["phoneme_symbols"]
lang_symbols = config["preprocessing"]["languages"]
char_repeats = config["preprocessing"]["char_repeats"]
lowercase = config["preprocessing"]["lowercase"]
lang_tokenizer = LanguageTokenizer(lang_symbols)
text_tokenizer = SequenceTokenizer(
symbols=text_symbols,
languages=lang_symbols,
char_repeats=char_repeats,
lowercase=lowercase,
append_start_end=True,
)
phoneme_tokenizer = SequenceTokenizer(
phoneme_symbols,
languages=lang_symbols,
lowercase=False,
char_repeats=1,
append_start_end=True,
)
return Preprocessor(
lang_tokenizer=lang_tokenizer,
text_tokenizer=text_tokenizer,
phoneme_tokenizer=phoneme_tokenizer,
) | PypiClean |
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/axelrod/strategies/axelrod_second.py | from typing import List
import numpy as np
from axelrod.action import Action
from axelrod.interaction_utils import compute_final_score
from axelrod.player import Player
from axelrod.strategies.finite_state_machines import FSMPlayer
C, D = Action.C, Action.D
class SecondByChampion(Player):
"""
Strategy submitted to Axelrod's second tournament by Danny Champion.
This player cooperates on the first 10 moves and plays Tit for Tat for the
next 15 more moves. After 25 moves, the program cooperates unless all the
following are true: the other player defected on the previous move, the
other player cooperated less than 60% and the random number between 0 and 1
is greater that the other player's cooperation rate.
Names:
- Champion: [Axelrod1980b]_
"""
name = "Second by Champion"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
current_round = len(self.history)
# Cooperate for the first 10 turns
if current_round == 0:
return C
if current_round < 10:
return C
# Mirror partner for the next phase
if current_round < 25:
return opponent.history[-1]
# Now cooperate unless all of the necessary conditions are true
defection_prop = opponent.defections / len(opponent.history)
if opponent.history[-1] == D:
r = self._random.random()
if defection_prop >= max(0.4, r):
return D
return C
class SecondByEatherley(Player):
"""
Strategy submitted to Axelrod's second tournament by Graham Eatherley.
A player that keeps track of how many times in the game the other player
defected. After the other player defects, it defects with a probability
equal to the ratio of the other's total defections to the total moves to
that point.
Names:
- Eatherley: [Axelrod1980b]_
"""
name = "Second by Eatherley"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
# Cooperate on the first move
if not len(opponent.history):
return C
# Reciprocate cooperation
if opponent.history[-1] == C:
return C
# Respond to defections with probability equal to opponent's total
# proportion of defections
defection_prop = opponent.defections / len(opponent.history)
return self._random.random_choice(1 - defection_prop)
class SecondByTester(Player):
"""
Submitted to Axelrod's second tournament by David Gladstein.
This strategy is a TFT variant that attempts to exploit certain strategies. It
defects on the first move. If the opponent ever defects, TESTER 'apologies' by
cooperating and then plays TFT for the rest of the game. Otherwise TESTER
alternates cooperation and defection.
This strategy came 46th in Axelrod's second tournament.
Names:
- Tester: [Axelrod1980b]_
"""
name = "Second by Tester"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.is_TFT = False
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
# Defect on the first move
if not opponent.history:
return D
# Am I TFT?
if self.is_TFT:
return D if opponent.history[-1:] == [D] else C
else:
# Did opponent defect?
if opponent.history[-1] == D:
self.is_TFT = True
return C
if len(self.history) in [1, 2]:
return C
# Alternate C and D
return self.history[-1].flip()
class SecondByGladstein(Player):
"""
Submitted to Axelrod's second tournament by David Gladstein.
This strategy is also known as Tester and is based on the reverse
engineering of the Fortran strategies from Axelrod's second tournament.
This strategy is a TFT variant that defects on the first round in order to
test the opponent's response. If the opponent ever defects, the strategy
'apologizes' by cooperating and then plays TFT for the rest of the game.
Otherwise, it defects as much as possible subject to the constraint that
the ratio of its defections to moves remains under 0.5, not counting the
first defection.
Names:
- Gladstein: [Axelrod1980b]_
- Tester: [Axelrod1980b]_
"""
name = "Second by Gladstein"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
# This strategy assumes the opponent is a patsy
self.patsy = True
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
# Defect on the first move
if not self.history:
return D
# Is the opponent a patsy?
if self.patsy:
# If the opponent defects, apologize and play TFT.
if opponent.history[-1] == D:
self.patsy = False
return C
# Cooperate as long as the cooperation ratio is below 0.5
cooperation_ratio = self.cooperations / len(self.history)
if cooperation_ratio > 0.5:
return D
return C
else:
# Play TFT
return opponent.history[-1]
class SecondByTranquilizer(Player):
"""
Submitted to Axelrod's second tournament by Craig Feathers
Description given in Axelrod's "More Effective Choice in the
Prisoner's Dilemma" paper: The rule normally cooperates but
is ready to defect if the other player defects too often.
Thus the rule tends to cooperate for the first dozen or two moves
if the other player is cooperating, but then it throws in a
defection. If the other player continues to cooperate, then defections
become more frequent. But as long as Tranquilizer is maintaining an
average payoff of at least 2.25 points per move, it will never defect
twice in succession and it will not defect more than
one-quarter of the time.
This implementation is based on the reverse engineering of the
Fortran strategy K67R from Axelrod's second tournament.
Reversed engineered by: Owen Campbell, Will Guo and Mansour Hakem.
The strategy starts by cooperating and has 3 states.
At the start of the strategy it updates its states:
- It counts the number of consecutive defections by the opponent.
- If it was in state 2 it moves to state 0 and calculates the
following quantities two_turns_after_good_defection_ratio and
two_turns_after_good_defection_ratio_count.
Formula for:
two_turns_after_good_defection_ratio:
self.two_turns_after_good_defection_ratio = (
((self.two_turns_after_good_defection_ratio
* self.two_turns_after_good_defection_ratio_count)
+ (3 - (3 * self.dict[opponent.history[-1]]))
+ (2 * self.dict[self.history[-1]])
- ((self.dict[opponent.history[-1]]
* self.dict[self.history[-1]])))
/ (self.two_turns_after_good_defection_ratio_count + 1)
)
two_turns_after_good_defection_ratio_count =
two_turns_after_good_defection_ratio + 1
- If it was in state 1 it moves to state 2 and calculates the
following quantities one_turn_after_good_defection_ratio and
one_turn_after_good_defection_ratio_count.
Formula for:
one_turn_after_good_defection_ratio:
self.one_turn_after_good_defection_ratio = (
((self.one_turn_after_good_defection_ratio
* self.one_turn_after_good_defection_ratio_count)
+ (3 - (3 * self.dict[opponent.history[-1]]))
+ (2 * self.dict[self.history[-1]])
- (self.dict[opponent.history[-1]]
* self.dict[self.history[-1]]))
/ (self.one_turn_after_good_defection_ratio_count + 1)
)
one_turn_after_good_defection_ratio_count:
one_turn_after_good_defection_ratio_count =
one_turn_after_good_defection_ratio + 1
If after this it is in state 1 or 2 then it cooperates.
If it is in state 0 it will potentially perform 1 of the 2
following stochastic tests:
1. If average score per turn is greater than 2.25 then it calculates a
value of probability:
probability = (
(.95 - (((self.one_turn_after_good_defection_ratio)
+ (self.two_turns_after_good_defection_ratio) - 5) / 15))
+ (1 / (((len(self.history))+1) ** 2))
- (self.dict[opponent.history[-1]] / 4)
)
and will cooperate if a random sampled number is less than that value of
probability. If it does not cooperate then the strategy moves to state 1
and defects.
2. If average score per turn is greater than 1.75 but less than 2.25
then it calculates a value of probability:
probability = (
(.25 + ((opponent.cooperations + 1) / ((len(self.history)) + 1)))
- (self.opponent_consecutive_defections * .25)
+ ((current_score[0]
- current_score[1]) / 100)
+ (4 / ((len(self.history)) + 1))
)
and will cooperate if a random sampled number is less than that value of
probability. If not, it defects.
If none of the above holds the player simply plays tit for tat.
Tranquilizer came in 27th place in Axelrod's second torunament.
Names:
- Tranquilizer: [Axelrod1980]_
"""
name = "Second by Tranquilizer"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self):
super().__init__()
self.num_turns_after_good_defection = 0 # equal to FD variable
self.opponent_consecutive_defections = 0 # equal to S variable
self.one_turn_after_good_defection_ratio = 5 # equal to AD variable
self.two_turns_after_good_defection_ratio = 0 # equal to NO variable
self.one_turn_after_good_defection_ratio_count = (
1 # equal to AK variable
)
self.two_turns_after_good_defection_ratio_count = (
1 # equal to NK variable
)
# All above variables correspond to those in original Fotran Code
self.dict = {C: 0, D: 1}
def update_state(self, opponent):
"""
Calculates the ratio values for the one_turn_after_good_defection_ratio,
two_turns_after_good_defection_ratio and the probability values,
and sets the value of num_turns_after_good_defection.
"""
if opponent.history[-1] == D:
self.opponent_consecutive_defections += 1
else:
self.opponent_consecutive_defections = 0
if self.num_turns_after_good_defection == 2:
self.num_turns_after_good_defection = 0
self.two_turns_after_good_defection_ratio = (
(
self.two_turns_after_good_defection_ratio
* self.two_turns_after_good_defection_ratio_count
)
+ (3 - (3 * self.dict[opponent.history[-1]]))
+ (2 * self.dict[self.history[-1]])
- (
(
self.dict[opponent.history[-1]]
* self.dict[self.history[-1]]
)
)
) / (self.two_turns_after_good_defection_ratio_count + 1)
self.two_turns_after_good_defection_ratio_count += 1
elif self.num_turns_after_good_defection == 1:
self.num_turns_after_good_defection = 2
self.one_turn_after_good_defection_ratio = (
(
self.one_turn_after_good_defection_ratio
* self.one_turn_after_good_defection_ratio_count
)
+ (3 - (3 * self.dict[opponent.history[-1]]))
+ (2 * self.dict[self.history[-1]])
- (
self.dict[opponent.history[-1]]
* self.dict[self.history[-1]]
)
) / (self.one_turn_after_good_defection_ratio_count + 1)
self.one_turn_after_good_defection_ratio_count += 1
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if not self.history:
return C
self.update_state(opponent)
if self.num_turns_after_good_defection in [1, 2]:
return C
current_score = compute_final_score(zip(self.history, opponent.history))
if (current_score[0] / ((len(self.history)) + 1)) >= 2.25:
probability = (
(
0.95
- (
(
(self.one_turn_after_good_defection_ratio)
+ (self.two_turns_after_good_defection_ratio)
- 5
)
/ 15
)
)
+ (1 / (((len(self.history)) + 1) ** 2))
- (self.dict[opponent.history[-1]] / 4)
)
if self._random.random() <= probability:
return C
self.num_turns_after_good_defection = 1
return D
if (current_score[0] / ((len(self.history)) + 1)) >= 1.75:
probability = (
(
0.25
+ ((opponent.cooperations + 1) / ((len(self.history)) + 1))
)
- (self.opponent_consecutive_defections * 0.25)
+ ((current_score[0] - current_score[1]) / 100)
+ (4 / ((len(self.history)) + 1))
)
if self._random.random() <= probability:
return C
return D
return opponent.history[-1]
class SecondByGrofman(Player):
"""
Submitted to Axelrod's second tournament by Bernard Grofman.
This strategy has 3 phases:
1. First it cooperates on the first two rounds
2. For rounds 3-7 inclusive, it plays the same as the opponent's last move
3. Thereafter, it applies the following logic, looking at its memory of the
last 8\* rounds (ignoring the most recent round).
- If its own previous move was C and the opponent has defected less than
3 times in the last 8\* rounds, cooperate
- If its own previous move was C and the opponent has defected 3 or
more times in the last 8\* rounds, defect
- If its own previous move was D and the opponent has defected only once
or not at all in the last 8\* rounds, cooperate
- If its own previous move was D and the opponent has defected more than
once in the last 8\* rounds, defect
The code looks at the first 7 of the last 8 rounds, ignoring the most
recent round.
Names:
- Grofman's strategy: [Axelrod1980b]_
- K86R: [Axelrod1980b]_
"""
name = "Second by Grofman"
classifier = {
"memory_depth": 8,
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
# Cooperate on the first two moves
if len(self.history) < 2:
return C
# For rounds 3-7, play the opponent's last move
elif 2 <= len(self.history) <= 6:
return opponent.history[-1]
else:
# Note: the Fortran code behavior ignores the opponent behavior
# in the last round and instead looks at the first 7 of the last
# 8 rounds.
opponent_defections_last_8_rounds = opponent.history[-8:-1].count(D)
if self.history[-1] == C and opponent_defections_last_8_rounds <= 2:
return C
if self.history[-1] == D and opponent_defections_last_8_rounds <= 1:
return C
return D
class SecondByKluepfel(Player):
"""
Strategy submitted to Axelrod's second tournament by Charles Kluepfel
(K32R).
This player keeps track of the the opponent's responses to own behavior:
- `cd_count` counts: Opponent cooperates as response to player defecting.
- `dd_count` counts: Opponent defects as response to player defecting.
- `cc_count` counts: Opponent cooperates as response to player cooperating.
- `dc_count` counts: Opponent defects as response to player cooperating.
After 26 turns, the player then tries to detect a random player. The
player decides that the opponent is random if
cd_counts >= (cd_counts+dd_counts)/2 - 0.75*sqrt(cd_counts+dd_counts) AND
cc_counts >= (dc_counts+cc_counts)/2 - 0.75*sqrt(dc_counts+cc_counts).
If the player decides that they are playing against a random player, then
they will always defect.
Otherwise respond to recent history using the following set of rules:
- If opponent's last three choices are the same, then respond in kind.
- If opponent's last two choices are the same, then respond in kind with
probability 90%.
- Otherwise if opponent's last action was to cooperate, then cooperate
with probability 70%.
- Otherwise if opponent's last action was to defect, then defect
with probability 60%.
Names:
- Kluepfel: [Axelrod1980b]_
"""
name = "Second by Kluepfel"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self):
super().__init__()
self.cd_counts, self.dd_counts, self.dc_counts, self.cc_counts = (
0,
0,
0,
0,
)
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
# First update the response matrix.
if len(self.history) >= 2:
if self.history[-2] == D:
if opponent.history[-1] == C:
self.cd_counts += 1
else:
self.dd_counts += 1
else:
if opponent.history[-1] == C:
self.cc_counts += 1
else:
self.dc_counts += 1
# Check for randomness
if len(self.history) > 26:
if self.cd_counts >= (
self.cd_counts + self.dd_counts
) / 2 - 0.75 * np.sqrt(
self.cd_counts + self.dd_counts
) and self.dc_counts >= (
self.dc_counts + self.cc_counts
) / 2 - 0.75 * np.sqrt(
self.dc_counts + self.cc_counts
):
return D
# Otherwise respond to recent history
one_move_ago, two_moves_ago, three_moves_ago = C, C, C
if len(opponent.history) >= 1:
one_move_ago = opponent.history[-1]
if len(opponent.history) >= 2:
two_moves_ago = opponent.history[-2]
if len(opponent.history) >= 3:
three_moves_ago = opponent.history[-3]
if one_move_ago == two_moves_ago and two_moves_ago == three_moves_ago:
return one_move_ago
r = self._random.random() # Everything following is stochastic
if one_move_ago == two_moves_ago:
if r < 0.9:
return one_move_ago
else:
return one_move_ago.flip()
if one_move_ago == C:
if r < 0.7:
return one_move_ago
else:
return one_move_ago.flip()
if one_move_ago == D:
if r < 0.6:
return one_move_ago
else:
return one_move_ago.flip()
class SecondByBorufsen(Player):
"""
Strategy submitted to Axelrod's second tournament by Otto Borufsen
(K32R), and came in third in that tournament.
This player keeps track of the the opponent's responses to own behavior:
- `cd_count` counts: Opponent cooperates as response to player defecting.
- `cc_count` counts: Opponent cooperates as response to player cooperating.
The player has a defect mode and a normal mode. In defect mode, the
player will always defect. In normal mode, the player obeys the following
ranked rules:
1. If in the last three turns, both the player/opponent defected, then
cooperate for a single turn.
2. If in the last three turns, the player/opponent acted differently from
each other and they're alternating, then change next defect to
cooperate. (Doesn't block third rule.)
3. Otherwise, do tit-for-tat.
Start in normal mode, but every 25 turns starting with the 27th turn,
re-evaluate the mode. Enter defect mode if any of the following
conditions hold:
- Detected random: Opponent cooperated 7-18 times since last mode
evaluation (or start) AND less than 70% of opponent cooperation was in
response to player's cooperation, i.e.
cc_count / (cc_count+cd_count) < 0.7
- Detect defective: Opponent cooperated fewer than 3 times since last mode
evaluation.
When switching to defect mode, defect immediately. The first two rules for
normal mode require that last three turns were in normal mode. When starting
normal mode from defect mode, defect on first move.
Names:
- Borufsen: [Axelrod1980b]_
"""
name = "Second by Borufsen"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self):
super().__init__()
self.cd_counts, self.cc_counts = 0, 0
self.mutual_defect_streak = 0
self.echo_streak = 0
self.flip_next_defect = False
self.mode = "Normal"
def try_return(self, to_return):
"""
We put the logic here to check for the `flip_next_defect` bit here,
and proceed like normal otherwise.
"""
if to_return == C:
return C
# Otherwise look for flip bit.
if self.flip_next_defect:
self.flip_next_defect = False
return C
return D
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
turn = len(self.history) + 1
if turn == 1:
return C
# Update the response history.
if turn >= 3:
if opponent.history[-1] == C:
if self.history[-2] == C:
self.cc_counts += 1
else:
self.cd_counts += 1
# Check if it's time for a mode change.
if turn > 2 and turn % 25 == 2:
coming_from_defect = False
if self.mode == "Defect":
coming_from_defect = True
self.mode = "Normal"
coops = self.cd_counts + self.cc_counts
# Check for a defective strategy
if coops < 3:
self.mode = "Defect"
# Check for a random strategy
if (8 <= coops <= 17) and self.cc_counts / coops < 0.7:
self.mode = "Defect"
self.cd_counts, self.cc_counts = 0, 0
# If defect mode, clear flags
if self.mode == "Defect":
self.mutual_defect_streak = 0
self.echo_streak = 0
self.flip_next_defect = False
# Check this special case
if self.mode == "Normal" and coming_from_defect:
return D
# Proceed
if self.mode == "Defect":
return D
else:
assert self.mode == "Normal"
# Look for mutual defects
if self.history[-1] == D and opponent.history[-1] == D:
self.mutual_defect_streak += 1
else:
self.mutual_defect_streak = 0
if self.mutual_defect_streak >= 3:
self.mutual_defect_streak = 0
self.echo_streak = 0 # Reset both streaks.
return self.try_return(C)
# Look for echoes
# Fortran code defaults two turns back to C if only second turn
my_two_back, opp_two_back = C, C
if turn >= 3:
my_two_back = self.history[-2]
opp_two_back = opponent.history[-2]
if (
self.history[-1] != opponent.history[-1]
and self.history[-1] == opp_two_back
and opponent.history[-1] == my_two_back
):
self.echo_streak += 1
else:
self.echo_streak = 0
if self.echo_streak >= 3:
self.mutual_defect_streak = 0 # Reset both streaks.
self.echo_streak = 0
self.flip_next_defect = True
# Tit-for-tat
return self.try_return(opponent.history[-1])
class SecondByCave(Player):
"""
Strategy submitted to Axelrod's second tournament by Rob Cave (K49R), and
came in fourth in that tournament.
First look for overly-defective or apparently random opponents, and defect
if found. That is any opponent meeting one of:
- turn > 39 and percent defects > 0.39
- turn > 29 and percent defects > 0.65
- turn > 19 and percent defects > 0.79
Otherwise, respond to cooperation with cooperation. And respond to defections
with either a defection (if opponent has defected at least 18 times) or with
a random (50/50) choice. [Cooperate on first.]
Names:
- Cave: [Axelrod1980b]_
"""
name = "Second by Cave"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
turn = len(self.history) + 1
if turn == 1:
return C
number_defects = opponent.defections
perc_defects = number_defects / turn
# Defect if the opponent has defected often or appears random.
if turn > 39 and perc_defects > 0.39:
return D
if turn > 29 and perc_defects > 0.65:
return D
if turn > 19 and perc_defects > 0.79:
return D
if opponent.history[-1] == D:
if number_defects > 17:
return D
else:
return self._random.random_choice(0.5)
else:
return C
class SecondByWmAdams(Player):
"""
Strategy submitted to Axelrod's second tournament by William Adams (K44R),
and came in fifth in that tournament.
Count the number of opponent defections after their first move, call
`c_defect`. Defect if c_defect equals 4, 7, or 9. If c_defect > 9,
then defect immediately after opponent defects with probability =
(0.5)^(c_defect-1). Otherwise cooperate.
Names:
- WmAdams: [Axelrod1980b]_
"""
name = "Second by WmAdams"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if len(self.history) <= 1:
return C
number_defects = opponent.defections
if opponent.history[0] == D:
number_defects -= 1
if number_defects in [4, 7, 9]:
return D
if number_defects > 9 and opponent.history[-1] == D:
return self._random.random_choice((0.5) ** (number_defects - 9))
return C
class SecondByGraaskampKatzen(Player):
"""
Strategy submitted to Axelrod's second tournament by Jim Graaskamp and Ken
Katzen (K60R), and came in sixth in that tournament.
Play Tit-for-Tat at first, and track own score. At select checkpoints,
check for a high score. Switch to Default Mode if:
- On move 11, score < 23
- On move 21, score < 53
- On move 31, score < 83
- On move 41, score < 113
- On move 51, score < 143
- On move 101, score < 293
Once in Defect Mode, defect forever.
Names:
- GraaskampKatzen: [Axelrod1980b]_
"""
name = "Second by GraaskampKatzen"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self):
super().__init__()
self.own_score = 0
self.mode = "Normal"
def update_score(self, opponent: Player):
game = self.match_attributes["game"]
last_round = (self.history[-1], opponent.history[-1])
self.own_score += game.score(last_round)[0]
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if self.mode == "Defect":
return D
turn = len(self.history) + 1
if turn == 1:
return C
self.update_score(opponent)
if (
turn == 11
and self.own_score < 23
or turn == 21
and self.own_score < 53
or turn == 31
and self.own_score < 83
or turn == 41
and self.own_score < 113
or turn == 51
and self.own_score < 143
or turn == 101
and self.own_score < 293
):
self.mode = "Defect"
return D
return opponent.history[-1] # Tit-for-Tat
class SecondByWeiner(Player):
"""
Strategy submitted to Axelrod's second tournament by Herb Weiner (K41R),
and came in seventh in that tournament.
Play Tit-for-Tat with a chance for forgiveness and a defective override.
The chance for forgiveness happens only if `forgive_flag` is raised
(flag discussed below). If raised and `turn` is greater than `grudge`,
then override Tit-for-Tat with Cooperation. `grudge` is a variable that
starts at 0 and increments 20 with each forgiven Defect (a Defect that is
overriden through the forgiveness logic). `forgive_flag` is lower whether
logic is overriden or not.
The variable `defect_padding` increments with each opponent Defect, but
resets to zero with each opponent Cooperate (or `forgive_flag` lowering) so
that it roughly counts Defects between Cooperates. Whenever the opponent
Cooperates, if `defect_padding` (before reseting) is odd, then we raise
`forgive_flag` for next turn.
Finally a defective override is assessed after forgiveness. If five or
more of the opponent's last twelve actions are Defects, then Defect. This
will overrule a forgiveness, but doesn't undo the lowering of
`forgiveness_flag`. Note that "last twelve actions" doesn't count the most
recent action. Actually the original code updates history after checking
for defect override.
Names:
- Weiner: [Axelrod1980b]_
"""
name = "Second by Weiner"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self):
super().__init__()
self.forgive_flag = False
self.grudge = 0
self.defect_padding = 0
self.last_twelve = [0] * 12
self.lt_index = 0 # Circles around last_twelve
def try_return(self, to_return):
"""
We put the logic here to check for the defective override.
"""
if np.sum(self.last_twelve) >= 5:
return D
return to_return
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if len(opponent.history) == 0:
return C
# Update history, lag 1.
if len(opponent.history) >= 2:
self.last_twelve[self.lt_index] = 0
if opponent.history[-2] == D:
self.last_twelve[self.lt_index] = 1
self.lt_index = (self.lt_index + 1) % 12
if self.forgive_flag:
self.forgive_flag = False
self.defect_padding = 0
if (
self.grudge < len(self.history) + 1
and opponent.history[-1] == D
):
# Then override
self.grudge += 20
return self.try_return(C)
else:
return self.try_return(opponent.history[-1])
else:
# See if forgive_flag should be raised
if opponent.history[-1] == D:
self.defect_padding += 1
else:
if self.defect_padding % 2 == 1:
self.forgive_flag = True
self.defect_padding = 0
return self.try_return(opponent.history[-1])
class SecondByHarrington(Player):
"""
Strategy submitted to Axelrod's second tournament by Paul Harrington (K75R)
and came in eighth in that tournament.
This strategy has three modes: Normal, Fair-weather, and Defect. These
mode names were not present in Harrington's submission.
In Normal and Fair-weather modes, the strategy begins by:
- Update history
- Try to detect random opponent if turn is multiple of 15 and >=30.
- Check if `burned` flag should be raised.
- Check for Fair-weather opponent if turn is 38.
Updating history means to increment the correct cell of the `move_history`.
`move_history` is a matrix where the columns are the opponent's previous
move and the rows are indexed by the combo of this player's and the
opponent's moves two turns ago. [The upper-left cell must be all
Cooperations, but otherwise order doesn't matter.] After we enter Defect
mode, `move_history` won't be used again.
If the turn is a multiple of 15 and >=30, then attempt to detect random.
If random is detected, enter Defect mode and defect immediately. If the
player was previously in Defect mode, then do not re-enter. The random
detection logic is a modified Pearson's Chi Squared test, with some
additional checks. [More details in `detect_random` docstrings.]
Some of this player's moves are marked as "generous." If this player made
a generous move two turns ago and the opponent replied with a Defect, then
raise the `burned` flag. This will stop certain generous moves later.
The player mostly plays Tit-for-Tat for the first 36 moves, then defects on
the 37th move. If the opponent cooperates on the first 36 moves, and
defects on the 37th move also, then enter Fair-weather mode and cooperate
this turn. Entering Fair-weather mode is extremely rare, since this can
only happen if the opponent cooperates for the first 36 then defects
unprovoked on the 37th. (That is, this player's first 36 moves are also
Cooperations, so there's nothing really to trigger an opponent Defection.)
Next in Normal Mode:
1. Check for defect and parity streaks.
2. Check if cooperations are scheduled.
3. Otherwise,
- If turn < 37, Tit-for-Tat.
- If turn = 37, defect, mark this move as generous, and schedule two
more cooperations**.
- If turn > 37, then if `burned` flag is raised, then Tit-for-Tat.
Otherwise, Tit-for-Tat with probability 1 - `prob`. And with
probability `prob`, defect, schedule two cooperations, mark this move
as generous, and increase `prob` by 5%.
** Scheduling two cooperations means to set `more_coop` flag to two. If in
Normal mode and no streaks are detected, then the player will cooperate and
lower this flag, until hitting zero. It's possible that the flag can be
overwritten. Notable on the 37th turn defect, this is set to two, but the
38th turn Fair-weather check will set this.
If the opponent's last twenty moves were defections, then defect this turn.
Then check for a parity streak, by flipping the parity bit (there are two
streaks that get tracked which are something like odd and even turns, but
this flip bit logic doesn't get run every turn), then incrementing the
parity streak that we're pointing to. If the parity streak that we're
pointing to is then greater than `parity_limit` then reset the streak and
cooperate immediately. `parity_limit` is initially set to five, but after
it has been hit eight times, it decreases to three. The parity streak that
we're pointing to also gets incremented if in normal mode and we defect but
not on turn 38, unless we are defecting as the result of a defect streak.
Note that the parity streaks resets but the defect streak doesn't.
If `more_coop` >= 1, then we cooperate and lower that flag here, in Normal
mode after checking streaks. Still lower this flag if cooperating as the
result of a parity streak or in Fair-weather mode.
Then use the logic based on turn from above.
In Fair-Weather mode after running the code from above, check if opponent
defected last turn. If so, exit Fair-Weather mode, and proceed THIS TURN
with Normal mode. Otherwise cooperate.
In Defect mode, update the `exit_defect_meter` (originally zero) by
incrementing if opponent defected last turn and decreasing by three
otherwise. If `exit_defect_meter` is then 11, then set mode to Normal (for
future turns), cooperate and schedule two more cooperations. [Note that
this move is not marked generous.]
Names:
- Harrington: [Axelrod1980b]_
"""
name = "Second by Harrington"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self):
super().__init__()
self.mode = "Normal"
self.recorded_defects = 0 # Count opponent defects after turn 1
self.exit_defect_meter = 0 # When >= 11, then exit defect mode.
self.coops_in_first_36 = (
None # On turn 37, count cooperations in first 36
)
self.was_defective = False # Previously in Defect mode
self.prob = 0.25 # After turn 37, probability that we'll defect
self.move_history = np.zeros([4, 2])
self.more_coop = 0 # This schedules cooperation for future turns
# Initial last_generous_n_turns_ago to 3 because this counts up and
# triggers a strategy change at 2.
self.last_generous_n_turns_ago = (
3 # How many tuns ago was a "generous" move
)
self.burned = False
self.defect_streak = 0
self.parity_streak = [
0,
0,
] # Counters that get (almost) alternatively incremented.
self.parity_bit = 0 # Which parity_streak to increment
self.parity_limit = (
5 # When a parity streak hits this limit, alter strategy.
)
self.parity_hits = 0 # Counts how many times a parity_limit was hit.
# After hitting parity_hits 8 times, lower parity_limit to 3.
def try_return(self, to_return, lower_flags=True, inc_parity=False):
"""
This will return to_return, with some end-of-turn logic.
"""
if lower_flags and to_return == C:
# In most cases when Cooperating, we want to reduce the number that
# are scheduled.
self.more_coop -= 1
self.last_generous_n_turns_ago += 1
if inc_parity and to_return == D:
# In some cases we increment the `parity_streak` that we're on when
# we return a Defection. In detect_parity_streak, `parity_streak`
# counts opponent's Defections.
self.parity_streak[self.parity_bit] += 1
return to_return
def calculate_chi_squared(self, turn):
"""
Pearson's Chi Squared statistic = sum[ (E_i-O_i)^2 / E_i ], where O_i
are the observed matrix values, and E_i is calculated as number (of
defects) in the row times the number in the column over (total number
in the matrix minus 1). Equivalently, we expect we expect (for an
independent distribution) the total number of recorded turns times the
portion in that row times the portion in that column.
In this function, the statistic is non-standard in that it excludes
summands where E_i <= 1.
"""
denom = turn - 2
expected_matrix = (
np.outer(
self.move_history.sum(axis=1), self.move_history.sum(axis=0)
)
/ denom
)
chi_squared = 0.0
for i in range(4):
for j in range(2):
expect = expected_matrix[i, j]
if expect > 1.0:
chi_squared += (
expect - self.move_history[i, j]
) ** 2 / expect
return chi_squared
def detect_random(self, turn):
"""
We check if the top-left cell of the matrix (corresponding to all
Cooperations) has over 80% of the turns. In which case, we label
non-random.
Then we check if over 75% or under 25% of the opponent's turns are
Defections. If so, then we label as non-random.
Otherwise we calculates a modified Pearson's Chi Squared statistic on
self.history, and returns True (is random) if and only if the statistic
is less than or equal to 3.
"""
denom = turn - 2
if self.move_history[0, 0] / denom >= 0.8:
return False
if (
self.recorded_defects / denom < 0.25
or self.recorded_defects / denom > 0.75
):
return False
if self.calculate_chi_squared(turn) > 3:
return False
return True
def detect_streak(self, last_move):
"""
Return true if and only if the opponent's last twenty moves are defects.
"""
if last_move == D:
self.defect_streak += 1
else:
self.defect_streak = 0
if self.defect_streak >= 20:
return True
return False
def detect_parity_streak(self, last_move):
"""
Switch which `parity_streak` we're pointing to and incerement if the
opponent's last move was a Defection. Otherwise reset the flag. Then
return true if and only if the `parity_streak` is at least
`parity_limit`.
This is similar to detect_streak with alternating streaks, except that
these streaks get incremented elsewhere as well.
"""
self.parity_bit = 1 - self.parity_bit # Flip bit
if last_move == D:
self.parity_streak[self.parity_bit] += 1
else:
self.parity_streak[self.parity_bit] = 0
if self.parity_streak[self.parity_bit] >= self.parity_limit:
return True
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
turn = len(self.history) + 1
if turn == 1:
return C
if self.mode == "Defect":
# There's a chance to exit Defect mode.
if opponent.history[-1] == D:
self.exit_defect_meter += 1
else:
self.exit_defect_meter -= 3
# If opponent has been mostly defecting.
if self.exit_defect_meter >= 11:
self.mode = "Normal"
self.was_defective = True
self.more_coop = 2
return self.try_return(to_return=C, lower_flags=False)
return self.try_return(D)
# If not Defect mode, proceed to update history and check for random,
# check if burned, and check if opponent's fairweather.
# If we haven't yet entered Defect mode
if not self.was_defective:
if turn > 2:
if opponent.history[-1] == D:
self.recorded_defects += 1
# Column decided by opponent's last turn
history_col = 1 if opponent.history[-1] == D else 0
# Row is decided by opponent's move two turns ago and our move
# two turns ago.
history_row = 1 if opponent.history[-2] == D else 0
if self.history[-2] == D:
history_row += 2
self.move_history[history_row, history_col] += 1
# Try to detect random opponent
if turn % 15 == 0 and turn > 15:
if self.detect_random(turn):
self.mode = "Defect"
return self.try_return(
D, lower_flags=False
) # Lower_flags not used here.
# If generous 2 turns ago and opponent defected last turn
if self.last_generous_n_turns_ago == 2 and opponent.history[-1] == D:
self.burned = True
# Only enter Fair-weather mode if the opponent Cooperated the first 37
# turns then Defected on the 38th.
if (
turn == 38
and opponent.history[-1] == D
and opponent.cooperations == 36
):
self.mode = "Fair-weather"
return self.try_return(to_return=C, lower_flags=False)
if self.mode == "Fair-weather":
if opponent.history[-1] == D:
self.mode = "Normal" # Post-Defect is not possible
# Proceed with Normal mode this turn.
else:
# Never defect against a fair-weather opponent
return self.try_return(C)
# Continue with Normal mode
# Check for streaks
if self.detect_streak(opponent.history[-1]):
return self.try_return(D, inc_parity=True)
if self.detect_parity_streak(opponent.history[-1]):
self.parity_streak[
self.parity_bit
] = 0 # Reset `parity_streak` when we hit the limit.
self.parity_hits += (
1 # Keep track of how many times we hit the limit.
)
if self.parity_hits >= 8: # After 8 times, lower the limit.
self.parity_limit = 3
return self.try_return(
C, inc_parity=True
) # Inc parity won't get used here.
# If we have Cooperations scheduled, then Cooperate here.
if self.more_coop >= 1:
return self.try_return(C, lower_flags=True, inc_parity=True)
if turn < 37:
# Tit-for-Tat
return self.try_return(opponent.history[-1], inc_parity=True)
if turn == 37:
# Defect once on turn 37 (if no streaks)
self.more_coop, self.last_generous_n_turns_ago = 2, 1
return self.try_return(D, lower_flags=False)
if self.burned or self._random.random() > self.prob:
# Tit-for-Tat with probability 1-`prob`
return self.try_return(opponent.history[-1], inc_parity=True)
# Otherwise Defect, Cooperate, Cooperate, and increase `prob`
self.prob += 0.05
self.more_coop, self.last_generous_n_turns_ago = 2, 1
return self.try_return(D, lower_flags=False)
class SecondByTidemanAndChieruzzi(Player):
"""
Strategy submitted to Axelrod's second tournament by T. Nicolaus Tideman
and Paula Chieruzzi (K84R) and came in ninth in that tournament.
This strategy Cooperates if this player's score exceeds the opponent's
score by at least `score_to_beat`. `score_to_beat` starts at zero and
increases by `score_to_beat_inc` every time the opponent's last two moves
are a Cooperation and Defection in that order. `score_to_beat_inc` itself
increase by 5 every time the opponent's last two moves are a Cooperation
and Defection in that order.
Additionally, the strategy executes a "fresh start" if the following hold:
- The strategy would Defect by score (difference less than `score_to_beat`)
- The opponent did not Cooperate and Defect (in order) in the last two
turns.
- It's been at least 10 turns since the last fresh start. Or since the
match started if there hasn't been a fresh start yet.
A "fresh start" entails two Cooperations and resetting scores,
`scores_to_beat` and `scores_to_beat_inc`.
Names:
- TidemanAndChieruzzi: [Axelrod1980b]_
"""
name = "Second by Tideman and Chieruzzi"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.current_score = 0
self.opponent_score = 0
self.last_fresh_start = 0
self.fresh_start = False
self.score_to_beat = 0
self.score_to_beat_inc = 0
def _fresh_start(self):
"""Give the opponent a fresh start by forgetting the past"""
self.current_score = 0
self.opponent_score = 0
self.score_to_beat = 0
self.score_to_beat_inc = 0
def _score_last_round(self, opponent: Player):
"""Updates the scores for each player."""
# Load the default game if not supplied by a tournament.
game = self.match_attributes["game"]
last_round = (self.history[-1], opponent.history[-1])
scores = game.score(last_round)
self.current_score += scores[0]
self.opponent_score += scores[1]
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
current_round = len(self.history) + 1
if current_round == 1:
return C
# Calculate the scores.
self._score_last_round(opponent)
# Check if we have recently given the strategy a fresh start.
if self.fresh_start:
self._fresh_start()
self.last_fresh_start = current_round
self.fresh_start = False
return C # Second cooperation
opponent_CDd = False
opponent_two_turns_ago = C # Default value for second turn.
if len(opponent.history) >= 2:
opponent_two_turns_ago = opponent.history[-2]
# If opponent's last two turns are C and D in that order.
if opponent_two_turns_ago == C and opponent.history[-1] == D:
opponent_CDd = True
self.score_to_beat += self.score_to_beat_inc
self.score_to_beat_inc += 5
# Cooperate if we're beating opponent by at least `score_to_beat`
if self.current_score - self.opponent_score >= self.score_to_beat:
return C
# Wait at least ten turns for another fresh start.
if (not opponent_CDd) and current_round - self.last_fresh_start >= 10:
# 50-50 split is based off the binomial distribution.
N = opponent.cooperations + opponent.defections
# std_dev = sqrt(N*p*(1-p)) where p is 1 / 2.
std_deviation = (N ** (1 / 2)) / 2
lower = N / 2 - 3 * std_deviation
upper = N / 2 + 3 * std_deviation
if opponent.defections <= lower or opponent.defections >= upper:
# Opponent deserves a fresh start
self.fresh_start = True
return C # First cooperation
return D
class SecondByGetzler(Player):
"""
Strategy submitted to Axelrod's second tournament by Abraham Getzler (K35R)
and came in eleventh in that tournament.
Strategy Defects with probability `flack`, where `flack` is calculated as
the sum over opponent Defections of 0.5 ^ (turns ago Defection happened).
Names:
- Getzler: [Axelrod1980b]_
"""
name = "Second by Getzler"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.flack = 0.0 # The relative untrustworthiness of opponent
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if not opponent.history:
return C
self.flack += 1 if opponent.history[-1] == D else 0
self.flack *= 0.5 # Defections have half-life of one round
return self._random.random_choice(1.0 - self.flack)
class SecondByLeyvraz(Player):
"""
Strategy submitted to Axelrod's second tournament by Fransois Leyvraz
(K68R) and came in twelfth in that tournament.
The strategy uses the opponent's last three moves to decide on an action
based on the following ordered rules.
1. If opponent Defected last two turns, then Defect with prob 75%.
2. If opponent Defected three turns ago, then Cooperate.
3. If opponent Defected two turns ago, then Defect.
4. If opponent Defected last turn, then Defect with prob 50%.
5. Otherwise (all Cooperations), then Cooperate.
Names:
- Leyvraz: [Axelrod1980b]_
"""
name = "Second by Leyvraz"
classifier = {
"memory_depth": 3,
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.prob_coop = {
(C, C, C): 1.0,
(C, C, D): 0.5, # Rule 4
(C, D, C): 0.0, # Rule 3
(C, D, D): 0.25, # Rule 1
(D, C, C): 1.0, # Rule 2
(D, C, D): 1.0, # Rule 2
(D, D, C): 1.0, # Rule 2
(D, D, D): 0.25, # Rule 1
}
def strategy(self, opponent: Player) -> Action:
recent_history = [C, C, C] # Default to C.
for go_back in range(1, 4):
if len(opponent.history) >= go_back:
recent_history[-go_back] = opponent.history[-go_back]
return self._random.random_choice(
self.prob_coop[
(recent_history[-3], recent_history[-2], recent_history[-1])
]
)
class SecondByWhite(Player):
"""
Strategy submitted to Axelrod's second tournament by Edward C White (K72R)
and came in thirteenth in that tournament.
* Cooperate in the first ten turns.
* If the opponent Cooperated last turn then Cooperate.
* Otherwise Defect if and only if:
floor(log(turn)) * opponent Defections >= turn
Names:
- White: [Axelrod1980b]_
"""
name = "Second by White"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
turn = len(self.history) + 1
if turn <= 10 or opponent.history[-1] == C:
return C
if np.floor(np.log(turn)) * opponent.defections >= turn:
return D
return C
class SecondByBlack(Player):
"""
Strategy submitted to Axelrod's second tournament by Paul E Black (K83R)
and came in fifteenth in that tournament.
The strategy Cooperates for the first five turns. Then it calculates the
number of opponent defects in the last five moves and Cooperates with
probability `prob_coop`[`number_defects`], where:
prob_coop[number_defects] = 1 - (number_defects^ 2 - 1) / 25
Names:
- Black: [Axelrod1980b]_
"""
name = "Second by Black"
classifier = {
"memory_depth": 5,
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
# Maps number of opponent defects from last five moves to own
# Cooperation probability
self.prob_coop = {0: 1.0, 1: 1.0, 2: 0.88, 3: 0.68, 4: 0.4, 5: 0.04}
def strategy(self, opponent: Player) -> Action:
if len(opponent.history) < 5:
return C
recent_history = opponent.history[-5:]
did_d = np.vectorize(lambda action: int(action == D))
number_defects = sum(did_d(recent_history))
return self._random.random_choice(self.prob_coop[number_defects])
class SecondByRichardHufford(Player):
"""
Strategy submitted to Axelrod's second tournament by Richard Hufford (K47R)
and came in sixteenth in that tournament.
The strategy tracks opponent "agreements", that is whenever the opponent's
previous move is the some as this player's move two turns ago. If the
opponent's first move is a Defection, this is counted as a disagreement,
and otherwise an agreement. From the agreement counts, two measures are
calculated:
- `proportion_agree`: This is the number of agreements (through opponent's
last turn) + 2 divided by the current turn number.
- `last_four_num`: The number of agreements in the last four turns. If
there have been fewer than four previous turns, then this is number of
agreement + (4 - number of past turns).
We then use these measures to decide how to play, using these rules:
1. If `proportion_agree` > 0.9 and `last_four_num` >= 4, then Cooperate.
2. Otherwise if `proportion_agree` >= 0.625 and `last_four_num` >= 2, then
Tit-for-Tat.
3. Otherwise, Defect.
However, if the opponent has Cooperated the last `streak_needed` turns,
then the strategy deviates from the usual strategy, and instead Defects.
(We call such deviation an "aberration".) In the turn immediately after an
aberration, the strategy doesn't override, even if there's a streak of
Cooperations. Two turns after an aberration, the strategy: Restarts the
Cooperation streak (never looking before this turn); Cooperates; and
changes `streak_needed` to:
floor(20.0 * `num_abb_def` / `num_abb_coop`) + 1
Here `num_abb_def` is 2 + the number of times that the opponent Defected in
the turn after an aberration, and `num_abb_coop` is 2 + the number of times
that the opponent Cooperated in response to an aberration.
Names:
- RichardHufford: [Axelrod1980b]_
"""
name = "Second by RichardHufford"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.num_agreements = 2
self.last_four_agreements = [1] * 4
self.last_four_index = 0
self.streak_needed = 21
self.current_streak = 2
self.last_aberration = float("inf")
self.coop_after_ab_count = 2
self.def_after_ab_count = 2
def strategy(self, opponent: Player) -> Action:
turn = len(self.history) + 1
if turn == 1:
return C
# Check if opponent agreed with us.
self.last_four_index = (self.last_four_index + 1) % 4
me_two_moves_ago = C
if turn > 2:
me_two_moves_ago = self.history[-2]
if me_two_moves_ago == opponent.history[-1]:
self.num_agreements += 1
self.last_four_agreements[self.last_four_index] = 1
else:
self.last_four_agreements[self.last_four_index] = 0
# Check if last_aberration is infinite.
# i.e Not an aberration in last two turns.
if turn < self.last_aberration:
if opponent.history[-1] == C:
self.current_streak += 1
else:
self.current_streak = 0
if self.current_streak >= self.streak_needed:
self.last_aberration = turn
if self.current_streak == self.streak_needed:
return D
elif turn == self.last_aberration + 2:
self.last_aberration = float("inf")
if opponent.history[-1] == C:
self.coop_after_ab_count += 1
else:
self.def_after_ab_count += 1
self.streak_needed = (
np.floor(
20.0 * self.def_after_ab_count / self.coop_after_ab_count
)
+ 1
)
self.current_streak = 0
return C
proportion_agree = self.num_agreements / turn
last_four_num = sum(self.last_four_agreements)
if proportion_agree > 0.9 and last_four_num >= 4:
return C
elif proportion_agree >= 0.625 and last_four_num >= 2:
return opponent.history[-1]
return D
class SecondByYamachi(Player):
"""
Strategy submitted to Axelrod's second tournament by Brian Yamachi (K64R)
and came in seventeenth in that tournament.
The strategy keeps track of play history through a variable called
`count_them_us_them`, which is a dict indexed by (X, Y, Z), where X is an
opponent's move and Y and Z are the following moves by this player and the
opponent, respectively. Each turn, we look at our opponent's move two
turns ago, call X, and our move last turn, call Y. If (X, Y, C) has
occurred more often (or as often) as (X, Y, D), then Cooperate. Otherwise
Defect. [Note that this reflects likelihood of Cooperations or Defections
in opponent's previous move; we don't update `count_them_us_them` with
previous move until next turn.]
Starting with the 41st turn, there's a possibility to override this
behavior. If `portion_defect` is between 45% and 55% (exclusive), then
Defect, where `portion_defect` equals number of opponent defects plus 0.5
divided by the turn number (indexed by 1). When overriding this way, still
record `count_them_us_them` as though the strategy didn't override.
Names:
- Yamachi: [Axelrod1980b]_
"""
name = "Second by Yamachi"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.count_them_us_them = {
(C, C, C): 0,
(C, C, D): 0,
(C, D, C): 0,
(C, D, D): 0,
(D, C, C): 0,
(D, C, D): 0,
(D, D, C): 0,
(D, D, D): 0,
}
self.mod_history = list() # type: List[Action]
def try_return(self, to_return, opp_def):
"""
Return `to_return`, unless the turn is greater than 40 AND
`portion_defect` is between 45% and 55%.
In this case, still record the history as `to_return` so that the
modified behavior doesn't affect the calculation of `count_us_them_us`.
"""
turn = len(self.history) + 1
self.mod_history.append(to_return)
# In later turns, check if the opponent is close to 50/50
# If so, then override
if turn > 40:
portion_defect = (opp_def + 0.5) / turn
if 0.45 < portion_defect < 0.55:
return D
return to_return
def strategy(self, opponent: Player) -> Action:
turn = len(self.history) + 1
if turn == 1:
return self.try_return(C, 0)
us_last = self.mod_history[-1]
them_two_ago, us_two_ago, them_three_ago = C, C, C
if turn >= 3:
them_two_ago = opponent.history[-2]
us_two_ago = self.mod_history[-2]
if turn >= 4:
them_three_ago = opponent.history[-3]
# Update history
if turn >= 3:
self.count_them_us_them[
(them_three_ago, us_two_ago, them_two_ago)
] += 1
if (
self.count_them_us_them[(them_two_ago, us_last, C)]
>= self.count_them_us_them[(them_two_ago, us_last, D)]
):
return self.try_return(C, opponent.defections)
return self.try_return(D, opponent.defections)
class SecondByColbert(FSMPlayer):
"""
Strategy submitted to Axelrod's second tournament by William Colbert (K51R)
and came in eighteenth in that tournament.
In the first eight turns, this strategy Coopearates on all but the sixth
turn, in which it Defects. After that, the strategy responds to an
opponent Cooperation with a single Cooperation, and responds to a Defection
with a chain of responses: Defect, Defect, Cooperate, Cooperate. During
this chain, the strategy ignores opponent's moves.
Names:
- Colbert: [Axelrod1980b]_
"""
name = "Second by Colbert"
classifier = {
"memory_depth": 4,
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
transitions = (
(0, C, 1, C),
(0, D, 1, C), # First 8 turns are special
(1, C, 2, C),
(1, D, 2, C),
(2, C, 3, C),
(2, D, 3, C),
(3, C, 4, C),
(3, D, 4, C),
(4, C, 5, D),
(4, D, 5, D), # Defect on 6th turn.
(5, C, 6, C),
(5, D, 6, C),
(6, C, 7, C),
(6, D, 7, C),
(7, C, 7, C),
(7, D, 8, D),
(8, C, 9, D),
(8, D, 9, D),
(9, C, 10, C),
(9, D, 10, C),
(10, C, 7, C),
(10, D, 7, C),
)
super().__init__(
transitions=transitions, initial_state=0, initial_action=C
)
class SecondByMikkelson(FSMPlayer):
"""
Strategy submitted to Axelrod's second tournament by Ray Mikkelson (K66R)
and came in twentieth in that tournament.
The strategy keeps track of a variable called `credit`, which determines if
the strategy will Cooperate, in the sense that if `credit` is positive,
then the strategy Cooperates. `credit` is initialized to 7. After the
first turn, `credit` increments if the opponent Cooperated last turn, and
decreases by two otherwise. `credit` is capped above by 8 and below by -7.
[`credit` is assessed as postive or negative, after increasing based on
opponent's last turn.]
If `credit` is non-positive within the first ten turns, then the strategy
Defects and `credit` is set to 4. If `credit` is non-positive later, then
the strategy Defects if and only if (total # opponent Defections) / (turn#)
is at least 15%. [Turn # starts at 1.]
Names:
- Mikkelson: [Axelrod1980b]_
"""
name = "Second by Mikkelson"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.credit = 7
def strategy(self, opponent: Player) -> Action:
turn = len(self.history) + 1
if turn == 1:
return C
if opponent.history[-1] == C:
self.credit += 1
if self.credit > 8:
self.credit = 8
else:
self.credit -= 2
if self.credit < -7:
self.credit = -7
if turn == 2:
return C
if self.credit > 0:
return C
if turn <= 10:
self.credit = 4
return D
if opponent.defections / turn >= 0.15:
return D
return C
class SecondByRowsam(Player):
"""
Strategy submitted to Axelrod's second tournament by Glen Rowsam (K58R)
and came in 21st in that tournament.
The strategy starts in Normal mode, where it cooperates every turn. Every
six turns it checks the score per turn. [Rather the score of all previous
turns divided by the turn number, which will be one more than the number of
turns scored.] If this measure is less than 2.5 (the strategy is doing
badly) and it increases `distrust_points`. `distrust_points` is a variable
that starts at 0; if it ever exceeds 6 points, the strategy will enter
Defect mode and defect from then on. It will increase `distrust_points`
depending on the precise score per turn according to:
- 5 points if score per turn is less than 1.0
- 3 points if score per turn is less than 1.5, but at least 1.0
- 2 points if score per turn is less than 2.0, but at least 1.5
- 1 points if score per turn is less than 2.5, but at least 2.0
If `distrust_points` are increased, then the strategy defects on that turn,
then cooperates and defects on the next two turns. [Unless
`distrust_points` exceeds 6 points, then it will enter Defect mode
immediately.]
Every 18 turns in Normal mode, the strategy will decrement `distrust_score`
if it's more than 3. This represents a wearing off effect of distrust.
Names:
- Rowsam: [Axelrod1980b]_
"""
name = "Second by Rowsam"
classifier = {
"memory_depth": float("inf"),
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.mode = "Normal"
self.distrust_points = 0
self.current_score = 0
self.opponent_score = 0
def _score_last_round(self, opponent: Player):
"""Updates the scores for each player."""
game = self.match_attributes["game"]
last_round = (self.history[-1], opponent.history[-1])
scores = game.score(last_round)
self.current_score += scores[0]
self.opponent_score += scores[1]
def strategy(self, opponent: Player) -> Action:
turn = len(self.history) + 1
if turn > 1:
self._score_last_round(opponent)
if self.mode == "Defect":
return D
if self.mode == "Coop Def Cycle 1":
self.mode = "Coop Def Cycle 2"
return C
if self.mode == "Coop Def Cycle 2":
self.mode = "Normal"
return D
# Opportunity for distrust to cool off.
if turn % 18 == 0:
if self.distrust_points >= 3:
self.distrust_points -= 1
# In normal mode, only check for strategy updates every sixth turn.
if turn % 6 != 0:
return C
points_per_turn = self.current_score / turn # Off by one
if points_per_turn < 1.0:
self.distrust_points += 5
elif points_per_turn < 1.5:
self.distrust_points += 3
elif points_per_turn < 2.0:
self.distrust_points += 2
elif points_per_turn < 2.5:
self.distrust_points += 1
else:
# Continue Cooperating
return C
if self.distrust_points >= 7:
self.mode = "Defect"
else:
# Def this time, then coop, then def.
self.mode = "Coop Def Cycle 1"
return D
class SecondByAppold(Player):
"""
Strategy submitted to Axelrod's second tournament by Scott Appold (K88R) and
came in 22nd in that tournament.
Cooperates for first four turns.
After four turns, will cooperate immediately following the first time the
opponent cooperates (starting with the opponent's fourth move). Otherwise
will cooperate with probability equal to:
- If this strategy defected two turns ago, the portion of the time
(historically) that the opponent followed a defection with a cooperation.
- If this strategy cooperated two turns ago, the portion of the time
(historically) that the opponent followed a cooperation with a cooperation.
The opponent's first move is counted as a response to a cooperation.
Names:
- Appold: [Axelrod1980b]_
"""
name = "Second by Appold"
classifier = {
"memory_depth": float("inf"),
"stochastic": True,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
# Probability of a cooperation after an x is:
# opp_c_after_x / total_num_of_x.
self.opp_c_after_x = {C: 0, D: 1}
# This is the total counted, so it doesn't include the most recent.
self.total_num_of_x = {C: 0, D: 1}
self.first_opp_def = False
def strategy(self, opponent: Player) -> Action:
turn = len(self.history) + 1
us_two_turns_ago = C if turn <= 2 else self.history[-2]
# Update trackers
if turn > 1:
self.total_num_of_x[us_two_turns_ago] += 1
if turn > 1 and opponent.history[-1] == C:
self.opp_c_after_x[us_two_turns_ago] += 1
if turn <= 4:
return C
if opponent.history[-1] == D and not self.first_opp_def:
self.first_opp_def = True
return C
# Calculate the probability that the opponent cooperated last turn given
# what we know two turns ago.
prob_coop = (
self.opp_c_after_x[us_two_turns_ago]
/ self.total_num_of_x[us_two_turns_ago]
)
return self._random.random_choice(prob_coop) | PypiClean |
/Biosaur-2.0.3.tar.gz/Biosaur-2.0.3/biosaur_src/classes.py | from copy import copy
from collections import defaultdict
import numpy as np
from scipy.signal import medfilt
import math
import itertools
def meanfilt(data, window_width):
cumsum_vec = np.cumsum(np.insert(data, 0, 0))
ma_vec = (cumsum_vec[window_width:] -
cumsum_vec[:-window_width]) / window_width
ma_vec = data[:1] + list(ma_vec) + data[-1:]
return ma_vec
class ready_hill:
def __init__(self, intensity, scan_id, mass, ion_mobility):
# self.mz = np.median(mass)
# self.mz = np.mean(mass)
# self.mz_std = np.std(mass)
self.intensity = intensity
self.scan_id = scan_id
self.scan_set = set(scan_id)
self.mass = mass
self.diff_for_output = 0
tmp = max(range(len(self.intensity)), key=self.intensity.__getitem__)
self.scan_of_max_intensity = self.scan_id[tmp]
self.max_intensity = self.intensity[tmp]
# self.mz = np.average(self.mass, weights=self.intensity)
# self.mz = sum(weight * value for weight, value in zip(self.intensity, self.mass)) / sum(self.intensity)
# self.mz = self.mass[tmp]
# self.max_intensity = sum(self.intensity)
if not (ion_mobility is None):
self.ion_mobility = ion_mobility
self.opt_ion_mobility = self.ion_mobility[tmp]
else:
self.ion_mobility = None
self.opt_ion_mobility = None
self.scan_len = len(self.scan_id)
self.idict = dict()
for i, j in zip(self.scan_id, self.intensity):
self.idict[i] = j
# self.sqrt_of_i_sum_squares = math.sqrt(
# sum(v**2 for v in self.idict.values()))
intensity_np = np.array(intensity)
self.sqrt_of_i_sum_squares = np.sqrt(np.sum(np.power(intensity_np, 2)))
class next_peak:
def __init__(
self,
next_mz_array,
next_intensity_array,
next_scan_id,
next_ion_mobility_array):
self.next_mz_array = next_mz_array
self.next_intensity_array = next_intensity_array
self.next_ion_mobility_array = next_ion_mobility_array
self.next_scan_id = next_scan_id
class peak_ion_mobility:
def __init__(self, mz, intensity, ion_mobility):
self.mz_array = [mz, ]
self.mass_array = [[mz, ], ]
self.intensity_array = [[intensity, ]]
self.ion_mobility_array = [[ion_mobility, ]]
self.intensity_max = [intensity, ]
self.ion_mobility_opt = [ion_mobility, ]
self.ion_mobility_max = [ion_mobility, ]
self.ion_mobility_min = [ion_mobility, ]
self.total = 1
def get_nearest_values(self, value):
return np.argsort(np.abs(self.mz_array) - value)
def extend(self, mz, intensity, ion_mobility):
self.mz_array.append(mz)
self.mass_array.append([mz, ])
self.intensity_array.append([intensity, ])
self.intensity_max.append(intensity)
self.ion_mobility_opt.append(ion_mobility)
self.ion_mobility_array.append([ion_mobility, ])
self.ion_mobility_max.append(ion_mobility)
self.ion_mobility_min.append(ion_mobility)
self.total += 1
def append_and_recalc(self, mz, intensity, ion_mobility, index):
self.mass_array[index].append(mz)
self.intensity_array[index].append(intensity)
self.ion_mobility_array[index].append(ion_mobility)
self.recalc(index)
def recalc(self, index):
self.mz_array[index] = np.mean(self.mass_array[index])
self.ion_mobility_max[index] = max(self.ion_mobility_array[index])
self.ion_mobility_min[index] = min(self.ion_mobility_array[index])
# if self.intensity_array[index][-1] > self.intensity_max[index]:
# # self.mz_array[index] =
# self.intensity_max[index] = self.intensity_array[index][-1]
# self.ion_mobility_opt[index] = self.ion_mobility_array[index][-1]
def push_me_to_the_peak_ion_mob(self, mz, intensity, ion_mobility, diff):
# nearest_ids = self.get_nearest_values(mz)
flag = 0
nearest_id = self.total - 1
mass_accuracy = diff * 1e-6 * mz
while nearest_id >= 0:
tmp_diff = abs(self.mz_array[nearest_id] - mz)
# tmp_diff = abs(self.mz_array[nearest_id] - mz) / mz
# if tmp_diff <= diff * 1e-6:
if tmp_diff <= mass_accuracy:
if abs(
self.ion_mobility_max[nearest_id] -
ion_mobility) <= 0.05 or abs(
self.ion_mobility_min[nearest_id] -
ion_mobility) <= 0.05:
flag = 1
self.append_and_recalc(
mz, intensity, ion_mobility, nearest_id)
break
else:
break
nearest_id -= 1
if not flag:
self.extend(mz, intensity, ion_mobility)
class peak:
def __init__(
self,
mz_array,
intensity,
scan_id,
start_id,
ion_mobility_array):
self.mz_array = copy(mz_array)
self.scan_id = [[scan_id, ] for _ in range(len(mz_array))]
# self.scan_id = []
# for _ in range(len(mz_array)):
# self.scan_id.append([scan_id, ])
self.intensity = [[i, ] for i in intensity]
if not (ion_mobility_array is None):
self.ion_mobility = [[i, ] for i in ion_mobility_array]
else:
self.ion_mobility = None
# self.intensity = []
# for i in intensity:
# self.intensity.append([i, ])
self.mass_array = [[i, ] for i in mz_array]
# self.mass_array = []
# for i in mz_array:
# self.mass_array.append([i, ])
self.finished_hills = []
self.crosslinked_hills = []
self.intervals = [start_id, ]
self.actual_degree = 0
self.medar = [1.0, ]
def get_potential_isotope_id(self, i_fast, i_idx):
tmp = self.finished_hills_fast_dict.get(i_fast, [])
# tmp.remove(i_idx)
return tmp
def recalc_fast_array_for_finished_hills(self, mz_step):
m_koef = mz_step
im_koef = 0.02
self.finished_hills_fast_array = [int(fh.mz/m_koef) for fh in self.finished_hills]
self.finished_hills_fast_dict = defaultdict(set)
for idx, fm in enumerate(self.finished_hills_fast_array):
self.finished_hills_fast_dict[fm-1].add(idx)
self.finished_hills_fast_dict[fm+1].add(idx)
self.finished_hills_fast_dict[fm].add(idx)
def recalc_fast_array(self, mz_step):
m_koef = mz_step
im_koef = 0.02
# self.fast_array = [int(tm/m_koef) for tm in self.mz_array]
self.fast_array = (self.mz_array/m_koef).astype(int)
self.fast_dict = defaultdict(set)
for idx, fm in enumerate(self.fast_array):
self.fast_dict[fm-1].add(idx)
self.fast_dict[fm+1].add(idx)
self.fast_dict[fm].add(idx)
def concat_peak_with(self, second_peak):
self.mz_array = self.mz_array + second_peak.mz_array
self.intensity = self.intensity + second_peak.intensity
if not (self.ion_mobility is None):
self.ion_mobility = self.ion_mobility + second_peak.ion_mobility
self.mass_array = self.mass_array + second_peak.mass_array
self.finished_hills = self.finished_hills + second_peak.finished_hills
self.crosslinked_hills = self.crosslinked_hills + \
second_peak.crosslinked_hills
self.intervals = self.intervals + second_peak.intervals
def crosslink_simple(self, mass_accuracy):
mz_step = mass_accuracy * 1e-6 * 2500
crosslink_counter = 0
self.finished_hills = sorted(
self.finished_hills,
key=lambda x: x.scan_id[0])
allowed_ids = set()
for i in self.intervals:
allowed_ids.add(i - 1)
allowed_ids.add(i - 2)
allowed_ids2 = set()
for i in self.intervals:
allowed_ids2.add(i)
allowed_ids2.add(i+1)
map_ids_1 = defaultdict(list)
map_ids_2 = defaultdict(set)
self.finished_hills_fast_dict = defaultdict(set)
m_koef = mz_step
for i, hill in enumerate(self.finished_hills):
end_scan = hill.scan_id[-1]
if end_scan in allowed_ids:
map_ids_1[end_scan].append(i)
fm = int(hill.mz / m_koef)
self.finished_hills_fast_dict[fm-1].add(i)
self.finished_hills_fast_dict[fm+1].add(i)
self.finished_hills_fast_dict[fm].add(i)
start_scan = hill.scan_id[0]
if start_scan in allowed_ids2:
map_ids_2[start_scan].add(i)
fm = int(hill.mz / m_koef)
self.finished_hills_fast_dict[fm-1].add(i)
self.finished_hills_fast_dict[fm+1].add(i)
self.finished_hills_fast_dict[fm].add(i)
banned_ids = set()
way_to_combine = []
for al_id in sorted(allowed_ids):
for i in map_ids_1[al_id]:
if i not in banned_ids:
hill = self.finished_hills[i]
fm = int(hill.mz / m_koef)
for j in self.finished_hills_fast_dict[fm]:
if (j in map_ids_2[al_id+1] or j in map_ids_2[al_id+2]) and j not in banned_ids:
hill2 = self.finished_hills[j]
if abs(hill.mz - hill2.mz) / \
hill.mz <= mass_accuracy * 1e-6:
banned_ids.add(i)
banned_ids.add(j)
way_to_combine.append((i, j))
for i, j in way_to_combine[::-1]:
self.finished_hills[i] = ready_hill(
intensity=hill.intensity +
hill2.intensity,
scan_id=hill.scan_id +
hill2.scan_id,
mass=hill.mass +
hill2.mass,
ion_mobility=(
hill.ion_mobility +
hill2.ion_mobility
if not (hill.ion_mobility is None)
else None))
del self.finished_hills[j]
for i in list(range(len(self.finished_hills)))[::-1]:
if len(self.finished_hills[i].scan_id) < 3:
del self.finished_hills[i]
def crosslink(self, mass_accuracy):
crosslink_counter = 0
# crosslink_counter2 = 0
self.finished_hills = sorted(
self.finished_hills,
key=lambda x: x.scan_id[0])
i = 0
ini_len = len(self.finished_hills)
while i < ini_len:
hill = self.finished_hills[i]
j = i + 1
while j < ini_len:
hill2 = self.finished_hills[j]
# if hill.scan_id[-1] == hill2.scan_id[0]:
if abs(hill.scan_id[-1] - hill2.scan_id[0]) <= 1:
# crosslink_counter2 += 1
if abs(hill.mz - hill2.mz) / \
hill.mz <= mass_accuracy * 1e-6:
self.finished_hills[i] = ready_hill(
intensity=hill.intensity + hill2.intensity,
scan_id=hill.scan_id + hill2.scan_id,
mass=hill.mass + hill2.mass,
ion_mobility=hill.ion_mobility +
hill2.ion_mobility)
del self.finished_hills[j]
ini_len -= 1
crosslink_counter += 1
elif hill2.scan_id[0] > hill.scan_id[-1] + 1:
break
j += 1
i += 1
# print(crosslink_counter)
# print(crosslink_counter2)
def sort_finished_hills(self):
self.finished_hills = sorted(self.finished_hills, key=lambda x: x.mz)
def check_its_ready(self, id_real, check_degree, min_length):
# ar_for_median = []
# for m_ar, scan_ar in zip(self.mass_array, self.scan_id):
# if scan_ar[-1] == id_real - 1:
# if len(m_ar) >= 2:
# ar_for_median.append(m_ar[-1]/m_ar[-2])
# # print(np.median(ar_for_median), 'median!')
# if len(ar_for_median) >= 20:
# self.medar.append(np.median(ar_for_median))
# else:
# self.medar.append(1.0)
mask_to_del = [True] * self.mz_array.size
set_to_del = set()
for i in range(self.mz_array.size)[::-1]:
# degree_actual = id_real - self.scan_id[i][0] - len(self.scan_id[i]) + 1
degree_actual = id_real - self.scan_id[i][-1]
# or (degree_actual == 2 and len(self.scan_id[i]) == 1):
if degree_actual > check_degree:
# degree_actual = id_real - self.scan_id[i][-1]
# if degree_actual > check_degree or (degree_actual == 2 and
# len(self.scan_id[i]) <= 3):
# list_intensity = self.intensity.pop(i)
list_intensity = self.intensity[i]
if not (self.ion_mobility is None):
# list_ion_mobility = self.ion_mobility.pop(i)
list_ion_mobility = self.ion_mobility[i]
else:
list_ion_mobility = None
# list_scan_id = self.scan_id.pop(i)
list_scan_id = self.scan_id[i]
# list_mass = self.mass_array.pop(i)
list_mass = self.mass_array[i]
lsi = len(list_scan_id)
if lsi >= min_length:
tmp_ready_hill = ready_hill(intensity=list_intensity,
scan_id=list_scan_id,
mass=list_mass,
ion_mobility=list_ion_mobility,
)
self.finished_hills.append(tmp_ready_hill)
mask_to_del[i] = False
set_to_del.add(i)
# if len(tmp_ready_hill.scan_id) >= min_length:
# self.finished_hills.append(tmp_ready_hill)
self.intensity = [i for j, i in enumerate(self.intensity) if j not in set_to_del]
self.scan_id = [i for j, i in enumerate(self.scan_id) if j not in set_to_del]
self.mass_array = [i for j, i in enumerate(self.mass_array) if j not in set_to_del]
if not (self.ion_mobility is None):
self.ion_mobility = [i for j, i in enumerate(self.ion_mobility) if j not in set_to_del]
self.mz_array = self.mz_array[mask_to_del]
def push_left(self, min_length):
mask_to_del = [True] * self.mz_array.size
for i in range(self.mz_array.size)[::-1]:
tmp_ready_hill = ready_hill(
intensity=self.intensity.pop(i),
scan_id=self.scan_id.pop(i),
mass=self.mass_array.pop(i),
ion_mobility=(
self.ion_mobility.pop(i) if not (
self.ion_mobility is None) else None),
)
mask_to_del[i] = False
if len(tmp_ready_hill.scan_id) >= min_length:
self.finished_hills.append(tmp_ready_hill)
self.mz_array = self.mz_array[mask_to_del]
# self.medar.append(1.0)
def get_nearest_value(self, value, mask):
return np.argmin(np.abs(self.mz_array[mask] - value))
def newid(self, nearest, mask):
return np.nonzero(mask)[0][nearest]
def get_potential_nearest(self, i_fast):
return self.fast_dict.get(i_fast, None)
def get_nearest_id(self, i, prev_nearest, diff, mz_array_l, ion_mobility, mask, mz_step):
mass_diff = diff * 1e-6 * i
best_diff = 2 * mass_diff
best_id = False
cur_md_abs = 0
best_prev_nearest_id = False
i_fast = int(i / mz_step)
set_idx = self.get_potential_nearest(i_fast)
if set_idx:
for nearest_id in set_idx:
if mask[nearest_id]:
# nearest_id = prev_nearest
# while nearest_id < mz_array_l:
cur_md = self.mz_array[nearest_id] - i
cur_md_abs = abs(cur_md)
if cur_md_abs <= mass_diff:
if not best_prev_nearest_id:
best_prev_nearest_id = int(nearest_id)
if (ion_mobility is None) or \
abs(ion_mobility -
self.ion_mobility[nearest_id][-1]) <= 0.1:
if cur_md_abs <= best_diff:
best_diff = float(cur_md_abs)
best_id = int(nearest_id)
# prev_nearest = int(nearest_id)
# elif cur_md > mass_diff:
# break
# nearest_id += 1
if not best_prev_nearest_id:
best_prev_nearest_id = prev_nearest
return best_id, best_diff / i, best_prev_nearest_id
def get_arrays(self, tmp1):
tmp1_nearest_id_arr = np.array([x[0] for x in tmp1])
tmp1_idx_arr = np.array([x[1] for x in tmp1])
tmp1_diff_arr = np.array([x[2] for x in tmp1])
return tmp1_nearest_id_arr, tmp1_idx_arr, tmp1_diff_arr
def push_me_to_the_peak(self, next_peak, diff, min_length, mz_step):
next_mz_array = next_peak.next_mz_array
next_intensity_array = next_peak.next_intensity_array
next_ion_mobility_array = next_peak.next_ion_mobility_array
next_scan_id = next_peak.next_scan_id
self.check_its_ready(
id_real=next_scan_id,
check_degree=2,
min_length=min_length)
mask = [True] * (len(self.mz_array))
tmp1 = []
tmp2 = []
prev_nearest = 0
self.recalc_fast_array(mz_step)
mask = [True] * (len(self.mz_array))
mz_array_l = len(self.mz_array)
for idx, i in enumerate(next_mz_array):
best_id, \
md_res, \
prev_nearest = self.get_nearest_id(
i,
prev_nearest,
diff,
mz_array_l,
(next_ion_mobility_array[idx]
if not (
next_ion_mobility_array is None)
else None), mask, mz_step)
if best_id:
tmp1.append([best_id, idx, md_res])
tmp1_nearest_id_arr, tmp1_idx_arr, tmp1_diff_arr = self.get_arrays(
tmp1)
sort_list = np.argsort(tmp1_diff_arr) # try different kinds
tmp1_nearest_id_arr = tmp1_nearest_id_arr[sort_list]
tmp1_idx_arr = tmp1_idx_arr[sort_list]
tmp1_diff_arr = tmp1_diff_arr[sort_list]
saved_index = set()
while tmp1:
# tmp_id = tmp1_idx_arr[0]
if tmp1_diff_arr.size == 0:
break
if tmp1_diff_arr[0] > diff * 1e-6:
break
tmp2.append((tmp1_nearest_id_arr[0], tmp1_idx_arr[0]))
saved_index.add(tmp1_idx_arr[0])
mask[tmp2[-1][0]] = False
if any(mask):
tmp1_nearest_id_arr = tmp1_nearest_id_arr[1:]
tmp1_idx_arr = tmp1_idx_arr[1:]
tmp1_diff_arr = tmp1_diff_arr[1:]
if tmp1_diff_arr.size == 0:
break
if tmp1_nearest_id_arr[0] in saved_index:
for idx, element in enumerate(tmp1_idx_arr):
if tmp1_nearest_id_arr[idx] in saved_index:
element_mz = next_mz_array[element]
element_im = (next_ion_mobility_array[element]
if not (
next_ion_mobility_array is None)
else None)
# nearest = self.get_nearest_value(element_mz, mask)
# nearest_id_old = self.newid(nearest, mask)
nearest_id, \
md_res, \
prev_nearest = self.get_nearest_id(
element_mz,
0,
diff,
0,
element_im, mask, mz_step)
if not nearest_id:
nearest_id = 0
md_res = 1e6
tmp1_nearest_id_arr[idx] = nearest_id
tmp1_diff_arr[idx] = md_res
else:
break
sort_list = np.argsort(
tmp1_diff_arr, kind='quicksort') # try different kinds
tmp1_nearest_id_arr = tmp1_nearest_id_arr[sort_list]
tmp1_idx_arr = tmp1_idx_arr[sort_list]
tmp1_diff_arr = tmp1_diff_arr[sort_list]
else:
break
for i, idx in tmp2:
# FIXME
# self.mz_array[i] = (self.mz_array[i] + next_mz_array[idx])/2
self.scan_id[i].append(next_scan_id)
self.intensity[i].append(next_intensity_array[idx])
if not (self.ion_mobility is None):
self.ion_mobility[i].append(next_ion_mobility_array[idx])
self.mass_array[i].append(next_mz_array[idx])
tmp_mass_array = self.mass_array[i][-3:]
self.mz_array[i] = sum(tmp_mass_array)/len(tmp_mass_array)
# self.mz_array[i] = np.average(self.mass_array[i][-3:], weights=self.intensity[i][-3:])
added = set(x[1] for x in tmp2)
mask2 = [(False if i in added else True)
for i in range(len(next_mz_array))]
next_mz_array_size = next_mz_array[mask2].size
self.mz_array = np.append(self.mz_array, next_mz_array[mask2])
n_i_a_m = next_intensity_array[mask2]
if not (self.ion_mobility is None):
n_im_a_m = next_ion_mobility_array[mask2]
n_m_a_m = next_mz_array[mask2]
for i in range(next_mz_array_size):
self.scan_id.append([next_scan_id, ])
self.intensity.append([n_i_a_m[i], ])
if not (self.ion_mobility is None):
self.ion_mobility.append([n_im_a_m[i], ])
self.mass_array.append([n_m_a_m[i], ])
self.selfsort()
def selfsort(self):
idx = np.argsort(self.mz_array)
self.mz_array = self.mz_array[idx]
self.scan_id = [self.scan_id[i] for i in idx]
self.intensity = [self.intensity[i] for i in idx]
if not (self.ion_mobility is None):
self.ion_mobility = [self.ion_mobility[i] for i in idx]
self.mass_array = [self.mass_array[i] for i in idx]
def cutting_down(self, intensity_propotion):
for idx, peak in enumerate(self.finished_hills):
max_intensity_propotion = peak.max_intensity * intensity_propotion
# FIXME try "and"
if (
peak.intensity[0] >= max_intensity_propotion and
peak.intensity[-1] >= max_intensity_propotion):
del self.finished_hills[idx]
def split_peaks2(self, hillValleyFactor):
set_to_del = set()
new_hills = []
for hill_idx, hill in enumerate(self.finished_hills):
if len(hill.mass) >= 6:
mz_diff = np.array([z - hill.mz for z in hill.mass])
std_5 = np.std(np.diff(mz_diff))
smothed_intensity = list(np.abs(np.diff(mz_diff))/std_5)
c_len = len(smothed_intensity) - 3
idx = 3
min_idx_list = []
min_val = 1.0
while idx <= c_len:
mult_val = smothed_intensity[idx]
if mult_val >= hillValleyFactor:
# if not len(min_idx_list) or idx >= min_idx_list[-1] + 3:
# min_idx_list.append(idx)
# min_val = mult_val
# elif mult_val < min_val:
# min_idx_list[-1] = idx
# min_val = mult_val
if (not len(min_idx_list) or idx >= min_idx_list[-1] + 3) and max(hill.intensity[0:idx-1]) >= 1.5 * max(hill.intensity[0], hill.intensity[idx-1]) and max(hill.intensity[idx:]) >= 1.5 * max(hill.intensity[idx], hill.intensity[-1]):
min_idx_list.append(idx)
min_val = mult_val
elif (mult_val < min_val) and max(hill.intensity[0:idx-1]) >= 1.5 * max(hill.intensity[0], hill.intensity[idx-1]) and max(hill.intensity[idx:]) >= 1.5 * max(hill.intensity[idx], hill.intensity[-1]):
min_idx_list[-1] = idx
min_val = mult_val
idx += 1
if len(min_idx_list):
set_to_del.add(hill_idx)
prev_idx = 1
for min_idx in min_idx_list:
new_hills.append(ready_hill(
intensity=hill.intensity[prev_idx-1:min_idx],
scan_id=hill.scan_id[prev_idx-1:min_idx],
mass=hill.mass[prev_idx-1:min_idx],
ion_mobility=(
hill.ion_mobility[prev_idx-1:min_idx] if not
(hill.ion_mobility is None) else
None)))
prev_idx = min_idx
new_hills.append(ready_hill(
intensity=hill.intensity[min_idx-1:],
scan_id=hill.scan_id[min_idx-1:],
mass=hill.mass[min_idx-1:],
ion_mobility=(
hill.ion_mobility[min_idx-1:] if not
(hill.ion_mobility is None) else
None)))
print(len(self.finished_hills))
for idx in sorted(list(set_to_del))[::-1]:
del self.finished_hills[idx]
print(len(self.finished_hills))
self.finished_hills.extend(new_hills)
print(len(self.finished_hills))
def calc_accurate_mz(self):
for hill in self.finished_hills:
hill.mz = sum(weight * value for weight, value in zip(hill.intensity, hill.mass)) / sum(hill.intensity)
def split_peaks(self, hillValleyFactor, min_length_hill):
set_to_del = set()
new_hills = []
for hill_idx, hill in enumerate(self.finished_hills):
hill_length = len(hill.intensity)
if hill_length >= min_length_hill * 2:
# smothed_intensity = hill.intensity
smothed_intensity = meanfilt(hill.intensity, 2)
# smothed_intensity = medfilt(smothed_intensity, 3)
# smothed_intensity = medfilt(hill.intensity, 3)
# smothed_intensity = meanfilt(smothed_intensity, 3)
c_len = hill_length - min_length_hill
idx = int(min_length_hill)
# min_idx = False
min_idx_list = []
min_val = 0
l_idx = 0
while idx <= c_len:
if len(min_idx_list) and idx >= min_idx_list[-1] + min_length_hill:
l_idx = min_idx_list[-1]
l_r = max(smothed_intensity[l_idx:idx]) / float(smothed_intensity[idx])
if l_r >= hillValleyFactor:
r_r = max(smothed_intensity[idx:]) / float(smothed_intensity[idx])
if r_r >= hillValleyFactor:
# print(l_r, r_r)
# if l_r >= hillValleyFactor and r_r >= hillValleyFactor:
mult_val = l_r * r_r
# if mult_val < min_val:
# min_val = mult_val
if not len(min_idx_list) or idx >= min_idx_list[-1] + min_length_hill:
min_idx_list.append(idx)
min_val = mult_val
elif mult_val > min_val:
min_idx_list[-1] = idx
min_val = mult_val
# min_idx = idx
idx += 1
if len(min_idx_list):
set_to_del.add(hill_idx)
prev_idx = 0
for min_idx in min_idx_list:
new_hills.append(ready_hill(
intensity=hill.intensity[prev_idx:min_idx+1],
scan_id=hill.scan_id[prev_idx:min_idx+1],
mass=hill.mass[prev_idx:min_idx+1],
ion_mobility=(
hill.ion_mobility[prev_idx:min_idx+1] if not
(hill.ion_mobility is None) else
None)))
prev_idx = min_idx
new_hills.append(ready_hill(
intensity=hill.intensity[min_idx:],
scan_id=hill.scan_id[min_idx:],
mass=hill.mass[min_idx:],
ion_mobility=(
hill.ion_mobility[min_idx:] if not
(hill.ion_mobility is None) else
None)))
# print(len(new_hills))
# print(len(set_to_del))
for idx in sorted(list(set_to_del))[::-1]:
del self.finished_hills[idx]
self.finished_hills.extend(new_hills)
# self.finished_hills = result
class feature:
def __init__(self, finished_hills, each, each_id, negative_mode, isotopes_mass_error_map, mass_accuracy):
self.charge = each[1][0][1]
self.shift = each[3]
# self.mz = finished_hills[each[0]].mz
# a_cus = 0.0033946045716987906 / 1000
# b_cus = -1.8123641799696435
mass_for_average2 = [np.average(finished_hills[each[0]].mass, weights=finished_hills[each[0]].intensity)]
intensity_for_average2 = [finished_hills[each[0]].max_intensity, ]
# for i_numb, ech in enumerate(each[1]):
# mass_for_average2.append(np.average(finished_hills[ech[0]].mass, weights=finished_hills[ech[0]].intensity) - (i_numb+1)*1.00335/ech[1])
# intensity_for_average2.append(finished_hills[ech[0]].max_intensity)
# mass_for_average2 = [zm * (1 - 1e-6 * (a_cus * zi + b_cus)) for zm, zi in zip(mass_for_average2, intensity_for_average2)]
self.mz = np.average(mass_for_average2, weights=intensity_for_average2)
mass_acc = mass_accuracy
self.mz_tol = mass_acc*1e-6*self.mz
# mass_for_average = finished_hills[each[0]].mass + list(itertools.chain.from_iterable([(z * (1 - 1e-6 * isotopes_mass_error_map[i_numb+1][0]) - (i_numb+1)*1.00335/ech[1]) for z in finished_hills[ech[0]].mass] for i_numb, ech in enumerate(each[1])))
# # mass_for_average = finished_hills[each[0]].mass + list(itertools.chain.from_iterable([(z - (i_numb+1)*1.00335/ech[1]) for z in finished_hills[ech[0]].mass] for i_numb, ech in enumerate(each[1])))
intensity_for_average = finished_hills[each[0]].intensity + list(itertools.chain.from_iterable(finished_hills[ech[0]].intensity for ech in each[1]))
# # mass_for_average = [zm * (1 - 1e-6 * (a_cus * zi + b_cus)) for zm, zi in zip(mass_for_average, intensity_for_average)]
# scans_for_average = finished_hills[each[0]].scan_id + list(itertools.chain.from_iterable(finished_hills[ech[0]].scan_id for ech in each[1]))
# # print(mass_for_average, intensity_for_average)
# self.mz = np.average(mass_for_average, weights=intensity_for_average)
# # self.mz = np.median(mass_for_average)
scans_for_average = finished_hills[each[0]].scan_id + list(itertools.chain.from_iterable(finished_hills[ech[0]].scan_id for ech in each[1]))
# self.mz = np.median(finished_hills[each[0]].mass)
# self.mz = np.mean(finished_hills[each[0]].mass)
self.negative_mode = negative_mode
if negative_mode == True:
self.neutral_mass = self.mz * self.charge + \
1.0072765 * self.charge - self.shift * 1.00335
else:
self.neutral_mass = self.mz * self.charge - \
1.0072765 * self.charge - self.shift * 1.00335
self.isotopes_numb = len(each[1])
self.scan_numb = len(finished_hills[each[0]].scan_id)
self.scans = finished_hills[each[0]].scan_id
self.id_for_scan = finished_hills[each[0]].intensity.index(
max(finished_hills[each[0]].intensity))
self.intensity = finished_hills[each[0]].max_intensity
# self.mz = self.mz * (1 - 1e-6 * (a_cus * max(intensity_for_average2) + b_cus))
# self.id_for_scan = intensity_for_average.index(
# max(intensity_for_average))
# self.intensity = max(intensity_for_average)
self.idict = finished_hills[each[0]].idict
self.sqrt_of_i_sum_squares = math.sqrt(
sum(v**2 for v in self.idict.values()))
self.scan_set = finished_hills[each[0]].scan_set
if not (finished_hills[each[0]].ion_mobility is None):
self.ion_mobility = finished_hills[each[0]].opt_ion_mobility
else:
self.ion_mobility = None
# self.scan_id = scans_for_average[self.id_for_scan]
# self.scan_id = finished_hills[each[0]].scan_id[self.id_for_scan]
# self.RT = self.scan_numb
self.scan_id = int(np.average(scans_for_average, weights=intensity_for_average))
self.RT = int(np.average(scans_for_average, weights=intensity_for_average))
# self.sulfur = (1 if each[2] else 0)
self.sulfur = (each[1][1][4] if len(each[1]) > 1 else -1)
self.cos_corr = each[4][0]
self.cos_corr_2 = each[4][1]
self.corr_fill_zero = each[4][2]
self.diff_for_output = each[4][3]
self.intensity_1 = each[4][4]
self.scan_id_1 = each[4][5]
self.mz_std_1 = np.std(each[4][6])
self.intensity_2 = each[4][7]
self.scan_id_2 = each[4][8]
self.mz_std_2 = np.std(each[4][9])
self.id = each_id
self.ms2_scan = []
def targeted(self, scan):
self.ms2_scan.append(scan) | PypiClean |
/Cuttlefish-0.3.tar.gz/Cuttlefish-0.3/cuttlefish/__init__.py | from bottle import redirect, request, route, send_file
from mako.exceptions import TopLevelLookupException
from mako.lookup import TemplateCollection
from mako.template import Template
import os.path
import subprocess
__version__ = '0.3'
__license__ = 'MIT'
class Config:
"""
Cuttlefish config from plist file.
"""
filename = 'cuttlefish-config.plist'
path_to_static = '.'
plist = {}
kleene_collection = ["."] # Have a usable default when no config is loaded
@classmethod
def collections(cls):
return cls.plist['collections'].keys()
@classmethod
def loadFromVicinity(cls, path):
from os.path import expanduser, expandvars, isdir, dirname, join
from plistlib import readPlist
path = expanduser(expandvars(path)) # Unix goodness
if not isdir(path):
path = dirname(path)
file = join(path, cls.filename)
cls.plist = readPlist(file)
cls.kleene_collection = sum([cls.plist['collections'][k] for k in cls.collections()], [])
cls.path_to_static = join(dirname(__file__), 'static')
class SourceTemplate (Template):
"""
Mako Template subclass provides globals to render context.
"""
globals = {}
def render(self, **kwargs):
extra_kwargs = SourceTemplate.globals.copy()
extra_kwargs.update(kwargs)
return super(SourceTemplate, self).render(**extra_kwargs)
class SourceTemplateCollection (TemplateCollection):
"""
Mako TemplateCollection embedded in the source.
"""
def __init__(self):
TemplateCollection.__init__(self)
kwargs = {
'input_encoding': 'utf-8',
'output_encoding': 'utf-8',
'encoding_errors': 'replace',
'format_exceptions': True,
'lookup': self,
}
self.builtins = {}
self.builtins['attribution'] = Template(ur"""# -*- coding: utf-8 -*-
<center id="copyleft" class="tiny">
<p>Copyright © 2009 Kaelin Colclasure • MIT License • See <a href=${self_url("/LICENSE.html")}>LICENSE</a> for details…<br/>
Cuttlefish logo by Randall Munroe • <a href="http://www.xkcd.com/520">www.xkcd.com</a></p>
</center>
""", **kwargs)
self.builtins['formq'] = Template(ur"""# -*- coding: utf-8 -*-
<%def name="mkoption(label,value,selected=None)">
% if value == selected:
<option value="${value}" selected="True">\
% else:
<option value="${value}">\
% endif
${label | h}</option>
</%def>
<form action=${self_url("/search")} accept-charset="utf-8" method="get">
<table align="center">
<tr>
<td class="nobr">
<input name="q" value="${q | u}" type="search" placeholder="" autosave="net.colclasure.cuttlefish.q" results="20" maxLength="256" size="55" />
<input name="c" value="3" type="hidden" />
<input name="r" value="cooked" type="hidden" />
<input type="submit" value="Search" name="btn" />
</td>
</tr>
<tr>
<td class="nobr" align="center">
Collection: <select id="collection" name="cn">
${mkoption("All collections", "*", cn)}
<optgroup label="Select collection">
% for collection in Config.collections():
${mkoption(collection, collection, cn)}
% endfor
</optgroup>
</select>
</td>
</tr>
</table>
</form>
""", **kwargs)
self.builtins['root'] = SourceTemplate(ur"""# -*- coding: utf-8 -*-
<html>
<head>
<title>Cuttlefish Search: ${subtitle | h}</title>
<link rel="stylesheet" type="text/css" href=${self_url("/static/style.css")} />
</head>
<body>
<center id="logo">
<p><a href=${self_url("/")} class="logolink">
<img src=${self_url("/static/cuttlefish.png")} height="150" />[cuttlefish]
</a></p>
</center>
<%include file='formq' />
<%include file='attribution' />
</body>
</html>
""", **kwargs)
self.builtins['cooked'] = Template(ur"""# -*- coding: utf-8 -*-
<table width="100%">
% for r in results:
<tr><td>${r.filename | h} (${r.match_count | h})</td></tr>
<tr><td><div class="context">
% for l in r.lines:
% if l[0] == -1:
</div><div class="context">
% else:
% if l[2]:
<a href="txmt://open?url=file%3A%2F%2F${r.filename | u}&line=${l[0]}">
% endif
<div class="${('contextline', 'matchline')[l[2]]}">${u"%5d %s" % (l[0], l[1]) | h}</div>
% if l[2]:
</a>
% endif
% endif
% endfor
</div></td></tr>
% endfor
</table>
""", **kwargs)
self.builtins['raw'] = Template(ur"""# -*- coding: utf-8 -*-
<pre>${results.raw_results | h}
</pre>
""", **kwargs)
self.builtins['results'] = SourceTemplate(ur"""# -*- coding: utf-8 -*-
<html>
<head>
<title>Cuttlefish Search: «${subtitle | h}»</title>
<link rel="stylesheet" type="text/css" href=${self_url("/static/style.css")} />
</head>
<body>
<center id="logosmall">
<p><a href=${self_url("/")} class="logolink">
<img src=${self_url("/static/cuttlefish.png")} height="100" /><br/>[cuttlefish]
</a></p>
</center>
<%include file='formq' />
% if render == 'cooked':
<%include file='cooked' />
% elif render == 'raw':
<%include file='raw' />
% else:
<%include file='nonesuch' />
% endif
<%include file='attribution' />
</body>
</html>
""", **kwargs)
self.builtins['license'] = SourceTemplate(ur"""# -*- coding: utf-8 -*-
<html>
<head>
<title>Cuttlefish Search: LICENSE</title>
<link rel="stylesheet" type="text/css" href=${self_url("/static/style.css")} />
</head>
<body>
<center id="logosmall">
<p><a href=${self_url("/")} class="logolink">
<img src=${self_url("/static/cuttlefish.png")} height="100" />[cuttlefish]
</a></p>
</center>
<center id="license">
<table>
<tr><td align="right" class="tiny">Version ${VERSION | h}</td></tr>
<tr><td><pre>${LICENSE | h}</pre></td></tr>
</table>
</center>
<div id="kudos" align="center">
Built with <a href="http://bottle.paws.de/"><img src=${self_url("/static/bottle-sig.png")} /></a>
& <a href="http://www.makotemplates.org/"><img src=${self_url("/static/mako-sig.png")} height="38" /></a>
</div>
<%include file='attribution' />
</body>
</html>
""", **kwargs)
def get_template(self, uri, request=None):
if request != None:
SourceTemplate.globals['Config'] = Config
SourceTemplate.globals['self_url'] = lambda path: '"%s%s"' % (request.environ['SCRIPT_NAME'], path)
try:
return self.builtins[uri]
except KeyError:
raise TopLevelLookupException("Cant locate template for uri '%s'" % uri)
stc = SourceTemplateCollection()
results = None
@route('/')
def root():
global results
results = None
return stc.get_template('root', request=request).render(subtitle=u"Python Source Code",
q="",
cn="*")
class MatchChunk:
"""
Represent one or more matches with their surrounding context.
"""
def __init__(self):
self.filename = None
self.lines = []
self.match_count = 0
self.is_last = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
if exc_type is StopIteration:
self.is_last = True
else:
return False
return True
def append(self, lnum, line, is_match=False):
if is_match:
self.match_count += 1
self.lines.append((lnum, line, is_match))
class MatchParser:
"""
Parse matches from `grep`.
"""
def __init__(self, input):
self.input = input
self.chunk = None
self.defer = None
self.fin = False
def _match(self, raw_result):
try:
parts = raw_result.split(u':', 2)
lnum = int(parts[1])
line = parts[2]
except (IndexError, ValueError):
return False
if self.chunk.filename != None:
if parts[0] != self.chunk.filename: # Bogus match
return False
else:
if not os.path.isfile(parts[0]): # Bogus match
return False
self.chunk.filename = parts[0]
[self._context(raw_defer) for raw_defer in self.defer]
self.chunk.append(lnum, line, is_match=True)
return True;
def _context(self, raw_result):
if self.chunk.filename == None:
return False
assert raw_result.startswith(self.chunk.filename), u"filename:'%s' raw_result:'%s'" % (self.chunk.filename, raw_result)
raw_result = raw_result[len(self.chunk.filename):]
parts = raw_result.split(u'-', 2)
lnum = int(parts[1])
line = parts[2]
self.chunk.append(lnum, line, is_match=False)
return True;
def next(self): # Raise StopIteration when no more results
if self.fin:
raise StopIteration
with MatchChunk() as self.chunk:
self.defer = []
raw_result = unicode(self.input.next(), 'utf-8', 'replace').rstrip()
while raw_result != u"--":
if self._match(raw_result) or self._context(raw_result):
pass
else:
self.defer.append(raw_result)
raw_result = unicode(self.input.next(), 'utf-8', 'replace').rstrip()
if self.chunk.is_last:
self.fin = True
if len(self.chunk.lines) == 0: # Happens when there are no results…
raise StopIteration
return self.chunk
class MatchChunkRunParser:
"""
Collect runs of chunks from a MatchParser.
"""
def __init__(self, input):
self.parser = MatchParser(input)
self.next_chunk = None
def next(self): # Raise StopIteration when no more results
if self.next_chunk == None:
self.next_chunk = self.parser.next()
chunk = self.next_chunk
while not chunk.is_last:
chunk = self.parser.next()
if chunk.filename != self.next_chunk.filename:
(self.next_chunk, chunk) = (chunk, self.next_chunk)
return chunk
self.next_chunk.append(-1, None) # Marker b/w original chunks
[self.next_chunk.append(*line) for line in chunk.lines]
(self.next_chunk, chunk) = (None, self.next_chunk)
return chunk
class SearchSubprocess:
"""
Search using `grep` in a subprocess.
"""
def __init__(self, query, c=3, cn="*", parser=MatchChunkRunParser):
self.query = query
self.parser = parser
cmd = ["/usr/bin/grep",
"--recursive",
"--binary-files=without-match",
"--line-number",
"--context=%d" % (c),
"--fixed-strings", query]
if cn == "*":
cmd.extend([os.path.abspath(path) for path in Config.kleene_collection])
else:
cmd.extend([os.path.abspath(path) for path in Config.plist['collections'][cn]])
self.proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self._raw_results = None
def __iter__(self): # Return a parser with a next() method…
return self.parser(self.proc.stdout)
@property
def raw_results(self):
if self._raw_results == None:
self._raw_results = unicode(self.proc.communicate()[0], 'utf-8', 'replace')
return self._raw_results
def quicklook(q):
from pprint import pprint
results = SearchSubprocess(q)
for chunk in results:
pprint((chunk.filename, chunk.match_count, chunk.lines))
@route('/search')
def search():
try:
q = unicode(request.GET['q'], 'utf-8', 'replace')
c = unicode(request.GET['c'], 'utf-8', 'replace')
r = unicode(request.GET['r'], 'utf-8', 'replace')
cn = unicode(request.GET['cn'], 'utf-8', 'replace')
global results
if results != None and results.query == q:
pass
#print "Using cached results… NOT!" # Disabled for now
#else:
results = SearchSubprocess(query=q, c=int(c), cn=cn)
return stc.get_template('results', request=request).render(subtitle=q,
q=q,
render=r,
cn=cn,
results=results)
except KeyError:
redirect(request.environ['SCRIPT_NAME'])
@route('/static/:filename')
def static(filename):
send_file(filename, root=Config.path_to_static)
@route('/LICENSE.html')
def license():
with open(os.path.join(Config.path_to_static, 'LICENSE.txt'), 'r') as file:
LICENSE = file.read()
return stc.get_template('license', request=request).render(LICENSE=LICENSE,
VERSION=__version__)
def see_bottle_run():
import bottle
Config.loadFromVicinity(__file__)
kwargs = Config.plist['bottle-run-kwargs']
bottle.run(**kwargs)
if __name__ == "__main__":
see_bottle_run() | PypiClean |
/NeodroidVision-0.3.0-py36-none-any.whl/neodroidvision/segmentation/fully_convolutional.py |
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 18/07/2020
"""
import math
import torch
from torch import nn
from torch.optim import Adam
from neodroidvision.segmentation import dice_loss
__all__ = ["FullyConvolutional", "FCN"]
class FullyConvolutional(nn.Module):
"""description"""
@staticmethod
def _pad(kernel_size: int, stride: int, dilation: int = 1) -> int:
"""
if length % stride == 0:
out_length = length // stride
else:
out_length = length // stride + 1
return math.ceil((out_length * stride + kernel_size - length - stride) / 2)"""
return math.ceil((1 - stride + dilation * (kernel_size - 1)) / 2)
@staticmethod
def conv2d_pool_block(
in_channels: int, out_channels: int, ext: bool = False
) -> torch.nn.Module:
"""
Args:
in_channels:
out_channels:
ext:
Returns:
"""
base_c = [
torch.nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
padding=FullyConvolutional._pad(3, 1),
),
torch.nn.ELU(),
torch.nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
padding=FullyConvolutional._pad(3, 1),
),
torch.nn.ELU(),
]
if ext:
base_c.extend(
[
torch.nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=3,
padding=FullyConvolutional._pad(3, 1),
),
torch.nn.ELU(),
]
)
base_c.append(
torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
) # Valid padding
return torch.nn.Sequential(*base_c)
def __init__(
self,
in_channels: int,
num_categories: int,
*,
final_act: callable,
base: int = 4,
t=8,
):
"""
FCN8
:param num_categories:
:type num_categories:
:param base:
:type base:"""
super().__init__()
i_c = in_channels
for ith_block in (0, 1):
i_c_n = 2 ** (base + ith_block)
setattr(self, f"pool_block{ith_block}", self.conv2d_pool_block(i_c, i_c_n))
i_c = i_c_n
for ith_block in (2, 3):
i_c_n = 2 ** (base + ith_block)
setattr(
self,
f"pool_block{ith_block}",
self.conv2d_pool_block(i_c, i_c_n, ext=True),
)
i_c = i_c_n
self.pool_block4 = self.conv2d_pool_block(i_c, 2 ** (base + 3), ext=True)
self.conv5 = torch.nn.Sequential(
torch.nn.Conv2d(
i_c, 2048, kernel_size=7, padding=FullyConvolutional._pad(7, 1)
),
torch.nn.Dropout(0.5),
)
self.conv6 = torch.nn.Sequential(
torch.nn.Conv2d(
2048, 2048, kernel_size=1, padding=FullyConvolutional._pad(1, 1)
),
torch.nn.Dropout(0.5),
)
for ith_block, ic2 in zip((2, 3), (num_categories, 2048)):
i_c_n = 2 ** (base + ith_block)
setattr(
self,
f"skip_block{ith_block}",
torch.nn.Sequential(
torch.nn.Conv2d(
i_c_n,
num_categories,
kernel_size=1,
padding=FullyConvolutional._pad(1, 1),
),
torch.nn.ELU(),
),
)
setattr(
self,
f"transpose_block{ith_block}",
torch.nn.ConvTranspose2d(
ic2,
num_categories,
kernel_size=2,
stride=2,
padding=FullyConvolutional._pad(2, 2),
),
)
self.head = torch.nn.Sequential(
torch.nn.ConvTranspose2d(
num_categories,
num_categories,
kernel_size=8,
stride=8,
padding=FullyConvolutional._pad(8, 8),
),
final_act,
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x:
Returns:
"""
for ith_block in (0, 1):
x = getattr(self, f"pool_block{ith_block}")(x)
pool2 = self.pool_block2(x)
pool3 = self.pool_block3(pool2)
x = self.conv6(self.conv5(self.pool_block4(pool3)))
s1, t1 = self.skip_block3(pool3), self.transpose_block3(x)
print(s1.shape, t1.shape)
x = s1 + t1
x = self.skip_block2(pool2) + self.transpose_block2(x)
return self.head(x)
FCN = FullyConvolutional
if __name__ == "__main__":
def a():
"""description"""
img_size = 224
in_channels = 5
n_classes = 2
metrics = dice_loss
if n_classes == 1:
# loss = 'binary_crossentropy'
loss = torch.nn.BCELoss()
final_act = torch.nn.Sigmoid()
elif n_classes > 1:
# loss = 'categorical_crossentropy'
loss = torch.nn.CrossEntropyLoss()
final_act = torch.nn.LogSoftmax(1) # across channels
model = FCN(in_channels, n_classes, final_act=final_act)
optimiser = Adam(model.parameters(), 1e-4)
pred = model(torch.ones((4, in_channels, img_size, img_size)))
print(pred)
a() | PypiClean |
/Cuckoo-2.0.7a1.tar.gz/Cuckoo-2.0.7a1/cuckoo/web/static/js/cuckoo/analysis_sidebar.js | 'use strict';
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
var AnalysisSidebar = function () {
function AnalysisSidebar(_$) {
_classCallCheck(this, AnalysisSidebar);
this.$ = _$;
this.searchInput = this.$.find('input[name="sidebar_search"]');
this.open = false;
this.locked = false;
this.search_active = false;
if (!window.localStorage.getItem('cuckoo-sidebar-locked')) {
window.localStorage.setItem('cuckoo-sidebar-locked', 'false');
} else {
window.localStorage.getItem('cuckoo-sidebar-locked') == 'true' ? this.lock() : null;
}
this.activateListeners();
this.scrollHandler();
}
_createClass(AnalysisSidebar, [{
key: 'activateListeners',
value: function activateListeners() {
var self = this;
// enable mouse opening
this.$.bind('mouseenter', function (e) {
self.onMouseEnter(e);
}).bind('mouseleave', function (e) {
self.onMouseOut(e);
});
// disable scrolling the nav
$(document).on('scroll', function (e) {
e.preventDefault();
return self.scrollHandler(e);
});
this.$.find('[href^=sidebar]').bind('click', function (e) {
e.preventDefault();
var action = $(this).attr('href').split(':')[1];
switch (action) {
case 'toggle-lock':
self.toggleLock();
break;
}
});
this.searchInput.bind('keyup', function (e) {
self.searchHandler(e, $(this).val());
});
}
}, {
key: 'onMouseEnter',
value: function onMouseEnter(e) {
this.open = true;
this.$.addClass('open');
}
}, {
key: 'onMouseOut',
value: function onMouseOut(e) {
if (!this.search_active) {
this.open = false;
this.$.removeClass('open');
}
}
}, {
key: 'scrollHandler',
value: function scrollHandler(e) {
var top = $(window).scrollTop();
this.$.find('.cuckoo-nav').css('transform', 'translate3d(0,' + top + 'px,0)');
}
}, {
key: 'lock',
value: function lock() {
this.locked = true;
this.$.addClass('locked');
window.localStorage.setItem('cuckoo-sidebar-locked', true);
}
}, {
key: 'unlock',
value: function unlock() {
this.locked = false;
this.$.removeClass('locked');
window.localStorage.setItem('cuckoo-sidebar-locked', false);
}
}, {
key: 'toggleLock',
value: function toggleLock() {
if (this.locked) {
this.unlock();
} else {
this.lock();
}
}
}, {
key: 'searchHandler',
value: function searchHandler(e, value) {
if (value.length > 0) {
this.search_active = true;
} else {
this.search_active = false;
}
}
}]);
return AnalysisSidebar;
}();
$(function () {
var sidebar;
if ($("#analysis-nav").length) sidebar = new AnalysisSidebar($('#analysis-nav'));
});
//# sourceMappingURL=analysis_sidebar.js.map | PypiClean |
/EnergyCapSdk-8.2304.4743.tar.gz/EnergyCapSdk-8.2304.4743/energycap/sdk/models/channel_create.py |
from msrest.serialization import Model
class ChannelCreate(Model):
"""ChannelCreate.
All required parameters must be populated in order to send to Azure.
:param interval_minutes: Required. The interval of the channel in minutes
15 = FifteenMinute,
30 = ThirtyMinute,
60 = Hourly,
1440 = Daily,
10080 = Weekly,
43200 = Monthly <span class='property-internal'>Required</span> <span
class='property-internal'>One of 15, 30, 60, 1440, 10080, 43200 </span>
:type interval_minutes: int
:param observation_type_code: Required. The observation type of the
channel <span class='property-internal'>Required</span>
:type observation_type_code: str
:param channel_description: Description of the channel <span
class='property-internal'>Must be between 0 and 4000 characters</span>
:type channel_description: str
:param channel_import_id: The import identifier for the channel <span
class='property-internal'>Must be between 0 and 255 characters</span>
:type channel_import_id: str
:param channel_version: Required.
:type channel_version: ~energycap.sdk.models.ChannelVersionRequest
"""
_validation = {
'interval_minutes': {'required': True},
'observation_type_code': {'required': True},
'channel_description': {'max_length': 4000, 'min_length': 0},
'channel_import_id': {'max_length': 255, 'min_length': 0},
'channel_version': {'required': True},
}
_attribute_map = {
'interval_minutes': {'key': 'intervalMinutes', 'type': 'int'},
'observation_type_code': {'key': 'observationTypeCode', 'type': 'str'},
'channel_description': {'key': 'channelDescription', 'type': 'str'},
'channel_import_id': {'key': 'channelImportId', 'type': 'str'},
'channel_version': {'key': 'channelVersion', 'type': 'ChannelVersionRequest'},
}
def __init__(self, **kwargs):
super(ChannelCreate, self).__init__(**kwargs)
self.interval_minutes = kwargs.get('interval_minutes', None)
self.observation_type_code = kwargs.get('observation_type_code', None)
self.channel_description = kwargs.get('channel_description', None)
self.channel_import_id = kwargs.get('channel_import_id', None)
self.channel_version = kwargs.get('channel_version', None) | PypiClean |
/LabtoolSuite-0.1.3.tar.gz/LabtoolSuite-0.1.3/Labtools/experiment.py | import os
os.environ['QT_API'] = 'pyqt'
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
# Import the core and GUI elements of Qt
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtGui as Widgets
# Import the console machinery from ipython
from IPython.qt.console.rich_ipython_widget import RichIPythonWidget
import sys
import functools,random
import scipy.optimize as optimize
import scipy.fftpack as fftpack
from Labtools.templates import template_exp
import time,sys
from customui_rc import *
import custom_widgets as Widgets
import numpy as np
import pyqtgraph as pg
import pyqtgraph.opengl as gl
import sys
#_fromUtf8 = QString.fromUtf8
class QIPythonWidget(RichIPythonWidget):
def __init__(self,customBanner=None,*args,**kwargs):
print 'importing'
from IPython.qt.inprocess import QtInProcessKernelManager
print 'import #2'
from IPython.lib import guisupport
if customBanner!=None: self.banner=customBanner
print 'initializing'
super(QIPythonWidget, self).__init__(*args,**kwargs)
print 'kernel manager creating'
self.kernel_manager = kernel_manager = QtInProcessKernelManager()
print 'kernel manager starting'
kernel_manager.start_kernel()
kernel_manager.kernel.gui = 'qt4'
self.kernel_client = kernel_client = self._kernel_manager.client()
kernel_client.start_channels()
def stop():
kernel_client.stop_channels()
kernel_manager.shutdown_kernel()
guisupport.get_app_qt4().exit()
self.exit_requested.connect(stop)
def pushVariables(self,variableDict):
""" Given a dictionary containing name / value pairs, push those variables to the IPython console widget """
self.kernel_manager.kernel.shell.push(variableDict)
def clearTerminal(self):
""" Clears the terminal """
self._control.clear()
def printText(self,text):
""" Prints some plain text to the console """
self._append_plain_text(text)
def executeCommand(self,command):
""" Execute a command in the frame of the console widget """
self._execute(command,False)
class ConvenienceClass():
"""
This class contains methods that simplify setting up and running
an experiment.
The :func:`arbitFit` method accepts two arrays, the fitting function,
and a keyword argument 'guess' that is an array containing
guess values for the various fiting parameters.
Guess values can be obtained using the :func:`getGuessValues` based on
a keyword argument 'func' which as of this moment can be either 'sine'
or 'damped sine'
"""
timers=[]
def __init__(self):
self.timers=[]
def loopTask(self,interval,func,*args):
"""
Creates a QTimer that executes 'func' every 'interval' milliseconds
all additional arguments passed to this function are passed on as
arguments to func
Refer to the source code for experiments such as diodeIV, Bandpass filter etc.
"""
timer = QTimer()
timerCallback = functools.partial(func,*args)
timer.timeout.connect(timerCallback)
timer.start(interval)
self.timers.append(timer)
return timer
def delayedTask(self,interval,func,*args):
"""
Creates a QTimer that executes 'func' once after 'interval' milliseconds.
all additional arguments passed to this function are passed on as
arguments to func
"""
timer = QTimer()
timerCallback = functools.partial(func,*args)
timer.singleShot(interval,timerCallback)
self.timers.append(timer)
def random_color(self):
c=(random.randint(20,255),random.randint(20,255),random.randint(20,255))
if np.average(c)<150:
c=self.random_color()
return c
def displayObjectContents(self,d):
"""
The contents of the dictionary 'd' are displayed in a new QWindow
"""
self.tree = pg.DataTreeWidget(data=d)
self.tree.show()
self.tree.setWindowTitle('Data')
self.tree.resize(600,600)
def dampedSine(self,x, amp, freq, phase,offset,damp):
"""
A damped sine wave function
"""
return offset + amp*np.exp(-damp*x)*np.sin(abs(freq)*x + phase)
def fitData(self,xReal,yReal,**args):
def mysine(x, a1, a2, a3,a4):
return a4 + a1*np.sin(abs(a2)*x + a3)
N=len(xReal)
yhat = fftpack.rfft(yReal)
idx = (yhat**2).argmax()
freqs = fftpack.rfftfreq(N, d = (xReal[1]-xReal[0])/(2*np.pi))
frequency = freqs[idx]
amplitude = (yReal.max()-yReal.min())/2.0
offset = yReal.max()-yReal.min()
frequency=args.get('frequency',1e6*abs(frequency)/(2*np.pi))*(2*np.pi)/1e6
phase=args.get('phase',0.)
guess = [amplitude, frequency, phase,offset]
try:
(amplitude, frequency, phase,offset), pcov = optimize.curve_fit(mysine, xReal, yReal, guess)
ph = ((phase)*180/(np.pi))
if(frequency<0):
#print 'negative frq'
return 0,0,0,0,pcov
if(amplitude<0):
#print 'AMP<0'
ph-=180
if(ph<-90):ph+=360
if(ph>360):ph-=360
freq=1e6*abs(frequency)/(2*np.pi)
amp=abs(amplitude)
if(frequency): period = 1./frequency
else: period = 0
pcov[0]*=1e6
return amp,freq,ph,offset,pcov
except:
return 0,0,0,0,[[]]
def getGuessValues(self,xReal,yReal,func='sine'):
if(func=='sine' or func=='damped sine'):
N=len(xReal)
offset = np.average(yReal)
yhat = fftpack.rfft(yReal-offset)
idx = (yhat**2).argmax()
freqs = fftpack.rfftfreq(N, d = (xReal[1]-xReal[0])/(2*np.pi))
frequency = freqs[idx]
amplitude = (yReal.max()-yReal.min())/2.0
phase=0.
if func=='sine':
return amplitude, frequency, phase,offset
if func=='damped sine':
return amplitude, frequency, phase,offset,0
def arbitFit(self,xReal,yReal,func,**args):
N=len(xReal)
guess=args.get('guess',[])
try:
results, pcov = optimize.curve_fit(func, xReal, yReal,guess)
pcov[0]*=1e6
return True,results,pcov
except:
return False,[],[]
class Experiment(QMainWindow,template_exp.Ui_MainWindow,Widgets.CustomWidgets): #,interface_rev4.Interface
def __init__(self,**args):
self.qt_app = QApplication(sys.argv)
super(Experiment, self).__init__(args.get('parent',None))
#interface_rev4.Interface.__init__(self)
self.setupUi(self)
Widgets.CustomWidgets.__init__(self)
self.timers=[]
self.I = None
self.graphContainer2_enabled=False
self.graphContainer1_enabled=False
self.console_enabled=False
self.output_enabled=False
self.viewBoxes=[]
self.plot_areas=[]
self.plots3D=[]
self.plots2D=[]
self.total_plot_areas=0
self.widgetBay = False
#self.additional_handle = QSplitterHandle(Qt.Horizontal,self.graph_splitter)
#self.graph_splitter.addWidget(self.additional_handle)
if(args.get('showresult',True)):
dock = QDockWidget()
dock.setFeatures(QDockWidget.DockWidgetMovable|QDockWidget.DockWidgetFloatable)#|QDockWidget.DockWidgetVerticalTitleBar)
dock.setWindowTitle("Results")
self.output_text = QTextEdit()
self.output_text.setReadOnly(True)
fr = QFrame()
plt = QGridLayout(fr)
plt.setMargin(0)
plt.addWidget(self.output_text)
self.output_enabled=True
sys.stdout = self.relay_to_console(self.output_text)
dock.setWidget(fr)
self.result_dock=dock
self.output_text.setStyleSheet("color: rgb(255, 255, 255);")
self.addDockWidget(Qt.BottomDockWidgetArea, dock)
def __resizeHack__():
self.result_dock.setMaximumHeight(100)
self.qt_app.processEvents()
self.result_dock.setMaximumHeight(2500)
self.delayedTask(0,__resizeHack__)
if(args.get('handler',False)):
self.addHandler(args.get('handler'))
def addPlotArea(self):
fr = QFrame(self.graph_splitter)
fr.setFrameShape(QFrame.StyledPanel)
fr.setFrameShadow(QFrame.Raised)
fr.setMinimumHeight(250)
self.total_plot_areas+=1
fr.setObjectName("plot"+str(self.total_plot_areas))
plt = QGridLayout(fr)
plt.setMargin(0)
self.plot_areas.append(plt)
return len(self.plot_areas)-1
def add3DPlot(self):
plot3d = gl.GLViewWidget()
#gx = gl.GLGridItem();gx.rotate(90, 0, 1, 0);gx.translate(-10, 0, 0);self.plot.addItem(gx)
#gy = gl.GLGridItem();gy.rotate(90, 1, 0, 0);gy.translate(0, -10, 0);self.plot.addItem(gy)
gz = gl.GLGridItem();#gz.translate(0, 0, -10);
plot3d.addItem(gz);
plot3d.opts['distance'] = 40
plot3d.opts['elevation'] = 5
plot3d.opts['azimuth'] = 20
pos=self.addPlotArea()
self.plot_areas[pos].addWidget(plot3d)
self.plots3D.append(plot3d)
plot3d.plotLines3D=[]
return plot3d
def add2DPlot(self):
plot=pg.PlotWidget()
pos=self.addPlotArea()
self.plot_areas[pos].addWidget(plot)
plot.viewBoxes=[]
plot.addLegend(offset=(-1,1))
self.plots2D.append(plot)
return plot
def add2DPlots(self,num):
for a in range(num):yield self.add2DPlot()
def add3DPlots(self,num):
for a in range(num):yield self.add3DPlot()
def enableRightAxis(self,plot):
p = pg.ViewBox()
plot.showAxis('right')
plot.setMenuEnabled(False)
plot.scene().addItem(p)
plot.getAxis('right').linkToView(p)
p.setXLink(plot)
plot.viewBoxes.append(p)
Callback = functools.partial(self.updateViews,plot)
plot.getViewBox().sigStateChanged.connect(Callback)
return p
def updateViews(self,plot):
for a in plot.viewBoxes:
a.setGeometry(plot.getViewBox().sceneBoundingRect())
def configureWidgetBay(self,name='controls'):
if(self.widgetBay):return
dock = QDockWidget()
dock.setFeatures(QDockWidget.DockWidgetMovable|QDockWidget.DockWidgetFloatable)#|QDockWidget.DockWidgetVerticalTitleBar)
dock.setWindowTitle(name)
fr = QFrame()
fr.setStyleSheet("QLineEdit {color: rgb(0,0,0);}QPushButton, QLabel ,QComboBox{color: rgb(255, 255, 255);}")
dock.setWidget(fr)
self.addDockWidget(Qt.LeftDockWidgetArea, dock)
self.frame_area = QVBoxLayout(fr)
self.frame_area.setMargin(0)
self.widgetBay = True
def updateWidgetBay(self,obj):
self.configureWidgetBay()
self.frame_area.addWidget(obj)
def addHandler(self,handler,name = 'Controls'):
'''
Add handler instance(subclass of QFrame) to the left side of the window.
The contents of the handler are QWidgets which control various aspects
of the experiment that the handler has been designed for.
'''
self.configureWidgetBay(name)
self.frame=handler
self.updateWidgetBay(self.frame)
#self.updateWidgetBay(self.frame)
try:
self.I = handler.I
if(self.console_enabled):
self.ipyConsole.pushVariables({"I":self.I})
self.ipyConsole.printText("Access hardware using the Instance 'I'. e.g. I.get_average_voltage(0)")
except:
print 'Device Not Connected.'
def addConsole(self,**args):
dock = QDockWidget()
dock.setFeatures(QDockWidget.DockWidgetMovable|QDockWidget.DockWidgetFloatable)#|QDockWidget.DockWidgetVerticalTitleBar)
dock.setWindowTitle("plot"+str(self.total_plot_areas+1))
fr = QFrame()
dock.setWidget(fr)
self.addDockWidget(Qt.BottomDockWidgetArea, dock)
fr.setFrameShape(QFrame.StyledPanel)
fr.setFrameShadow(QFrame.Raised)
self.ipyConsole = QIPythonWidget(customBanner="This is an interactive Python Console\n")
layout = QVBoxLayout(fr)
layout.setMargin(0)
layout.addWidget(self.ipyConsole)
cmdDict = {"delayedTask":self.delayedTask,"loopTask":self.loopTask,"addWidget":self.addWidget,"setCommand":self.setCommand,"Widgets":Widgets}
#if self.graphContainer1_enabled:cmdDict["graph"]=self.graph
if self.I :
cmdDict["I"]=self.I
self.ipyConsole.printText("Access hardware using the Instance 'I'. e.g. I.get_average_voltage(0)")
self.ipyConsole.pushVariables(cmdDict)
self.console_enabled=True
def new3dSurface(self,plot,**args):
import scipy.ndimage as ndi
surface3d = gl.GLSurfacePlotItem(z=np.array([[0.1,0.1],[0.1,0.1]]), **args)
#surface3d.shader()['colorMap']=pg.ColorMap(np.array([0.2,0.4,0.6]),np.array([[255,0,0,255],[0,255,0,255],[0,255,255,255]])).getLookupTable()
#surface3d.shader()['colorMap'] = np.array([0.2, 2, 0.5, 0.2, 1, 1, 0.2, 0, 2])
plot.addItem(surface3d)
return surface3d
def setSurfaceData(self,surf,z):
surf.setData(z=np.array(z))
def draw3dLine(self,plot,x,y,z,color=(100,100,100)):
pts = np.vstack([x,y,z]).transpose()
plt = gl.GLLinePlotItem(pos=pts, color=pg.glColor(color),width=2)
plot.addItem(plt)
plot.plotLines3D.append(plt)
return plt
def clearLinesOnPlane(self,plot):
for a in plot.plotLines3D:
plot.removeItem(a)# a.setData(pos=[[0,0,0]])
plot.plotLines3D=[]
class relay_to_console():
def __init__(self,console):
self.console = console
self.cursor = self.console.textCursor()
self.scroll=self.console.verticalScrollBar()
def write(self,arg):
f=open('b.txt','at')
self.cursor.movePosition(QTextCursor.End)
self.console.setTextCursor(self.cursor)
self.console.insertPlainText(arg)
#self.scroll.setValue(self.scroll.maximum())
f.write(arg)
def flush(self):
pass
def graph(self,x,y):
if(self.graphContainer1_enabled): self.reserved_curve.setData(x,y)
def setRange(self,plot,x,y,width,height):
plot.setRange(QtCore.QRectF(x,y,width,height))
def addCurve(self,plot,name='',col=(255,255,255),axis='left'):
#if(len(name)):curve = plot.plot(name=name)
#else:curve = plot.plot()
if(len(name)):curve = pg.PlotCurveItem(name=name)
else:curve = pg.PlotCurveItem()
plot.addItem(curve)
curve.setPen(color=col, width=1)
return curve
def rebuildLegend(plot,self):
self.plotLegend = plot.addLegend(offset=(-10,30))
def loopTask(self,interval,func,*args):
timer = QTimer()
timerCallback = functools.partial(func,*args)
timer.timeout.connect(timerCallback)
timer.start(interval)
self.timers.append(timer)
return timer
def delayedTask(self,interval,func,*args):
timer = QTimer()
timerCallback = functools.partial(func,*args)
timer.singleShot(interval,timerCallback)
self.timers.append(timer)
def run(self):
self.show()
self.qt_app.exec_()
def add_a_widget(self):
self.addButton('testing')
def addButton(self,name,command,*args):
b=QPushButton(None)
b.setText(name)
self.updateWidgetBay(b)
self.setCommand(b,"clicked()",command,*args)
return b
def addWidget(self,widget_type,**args):
b=widget_type(**args)
if(args.has_key('object_name')): b.setObjectName(args.get('object_name'))
if(args.has_key('text')): b.setText(args.get('text'))
if(args.has_key('items')):
for a in args.get('items'): b.addItem(a)
self.updateWidgetBay(b)
return b
def setCommand(self,widget,signal,slot,*args):
buttonCallback = functools.partial(slot,*args)
QObject.connect(widget, SIGNAL(signal), buttonCallback) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/localization/pl/MathMenu.js | MathJax.Localization.addTranslation("pl","MathMenu",{version:"2.7.9",isLoaded:true,strings:{Show:"Poka\u017C wzory jako",MathMLcode:"Kod MathML",OriginalMathML:"Oryginalny MathML",TeXCommands:"Polecenia TeX",AsciiMathInput:"Wej\u015Bcie AsciiMathML",Original:"Oryginalny formularz",ErrorMessage:"Komunikat o b\u0142\u0119dzie",Annotation:"Adnotacja",TeX:"TeX",StarMath:"StarMath",Maple:"Maple",ContentMathML:"Zawarto\u015B\u0107 MathML",OpenMath:"OpenMath",texHints:"Poka\u017C wskaz\u00F3wki TeX w MathML",Settings:"Ustawienia wzor\u00F3w",ZoomTrigger:"Zwi\u0119kszanie zoomu",Hover:"poprzez najechanie mysz\u0105",Click:"poprzez klikni\u0119cie",DoubleClick:"poprzez dwukrotnie klikni\u0119cie",NoZoom:"Bez zoomu",TriggerRequires:"Aktywacja wymaga:",Option:"Option",Alt:"Alt",Command:"Command",Control:"Ctrl",Shift:"Shift",ZoomFactor:"Wsp\u00F3\u0142czynnik powi\u0119kszenia",Renderer:"Renderowanie wzor\u00F3w",MPHandles:"Obs\u0142u\u017C MathPlayer",MenuEvents:"Zdarzenia menu",MouseEvents:"Zdarzenia myszy",MenuAndMouse:"Zdarzenia myszy i menu",FontPrefs:"Ustawienia czcionek",ForHTMLCSS:"Dla HTML-CSS:",Auto:"Automatycznie",TeXLocal:"TeX (lokalny)",TeXWeb:"TeX (www)",TeXImage:"TeX (obraz)",STIXLocal:"STIX (lokalny)",ContextMenu:"Menu kontekstowe",Browser:"Przegl\u0105darka",Scale:"Skalowanie wszystkich wzor\u00F3w...",Discoverable:"Podkre\u015Bl po najechaniu kursora",Locale:"J\u0119zyk",LoadLocale:"Pobierz z URL...",About:"O MathJax",Help:"Pomoc MathJax",localTeXfonts:"U\u017Cyj lokalnej czcionki TeX",webTeXfonts:"U\u017Cyj internetowej czcionki TeX",imagefonts:"U\u017Cyj czcionki obrazkowej",localSTIXfonts:"U\u017Cyj lokalnej czcionki STIX",webSVGfonts:"U\u017Cyj internetowej czcionki SVG",genericfonts:"U\u017Cyj generowanej czcionki unicode",wofforotffonts:"czcionki WOFF lub OTF",eotffonts:"czcionki EOT",svgfonts:"czcionki SVG",WebkitNativeMMLWarning:"Twoja przegl\u0105darka nie obs\u0142uguje MathML, wi\u0119c zmiana wyj\u015Bcia do MathML mo\u017Ce spowodowa\u0107, \u017Ce strona stanie si\u0119 niemo\u017Cliwa do odczytania.",MSIENativeMMLWarning:"Program Internet Explorer wymaga wtyczki MathPlayer do procesu wy\u015Bwietlania MathML.",OperaNativeMMLWarning:"Wsparcie dla MathML w Operze jest ograniczone. W zwi\u0105zku z tym zmiana wyj\u015Bcia na MathML mo\u017Ce spowodowa\u0107, \u017Ce niekt\u00F3re strony b\u0119d\u0105 niemo\u017Cliwe do odczytania.",SafariNativeMMLWarning:"MathML zaimplementowany w twojej przegl\u0105darce nie obs\u0142uguje wszystkich mo\u017Cliwo\u015Bci MathJax, wi\u0119c cz\u0119\u015B\u0107 wyra\u017Cen mo\u017Ce nie renderowa\u0107 si\u0119 poprawnie.",FirefoxNativeMMLWarning:"MathML zaimplementowany w twojej przegl\u0105darce nie obs\u0142uguje wszystkich mo\u017Cliwo\u015Bci MathJax, wi\u0119c cz\u0119\u015B\u0107 wyra\u017Ce\u0144 mo\u017Ce nie renderowa\u0107 si\u0119 poprawnie.",MSIESVGWarning:"SVG nie jest zaimplementowane w Internet Explorerze do wersji 9 lub podczas emulowania IE8 lub poni\u017Cej, wi\u0119c zmiana wyj\u015Bcia do SVG mo\u017Ce spowodowa\u0107, \u017Ce strona stanie si\u0119 niemo\u017Cliwa do odczytania.",LoadURL:"Za\u0142aduj t\u0142umaczenie z tego URL:",BadURL:'Adres URL powinien by\u0107 dla pliku JavaScript, kt\u00F3ry definiuje dane t\u0142umaczenie MathJax. Pliki JavaScript powinny ko\u0144czy\u0107 si\u0119 ".js"',BadData:"Nie mo\u017Cna za\u0142adowa\u0107 danych t\u0142umacze\u0144 z %1",SwitchAnyway:"Na pewno zmieni\u0107 renderer ?\n\n(Naci\u015Bnij OK a\u017Ceby zmieni\u0107, lub CANCEL aby kontynuowa\u0107 z aktualnym rendererem)",ScaleMath:"Skaluj wszystkie wzory matematyczne (por\u00F3wnane do otaczaj\u0105cego tekstu) przez",NonZeroScale:"Warto\u015B\u0107 nie powinna by\u0107 zerowa",PercentScale:"Warto\u015B\u0107 powinna by\u0107 w procentach (na przyk\u0142ad 120%%)",IE8warning:"Ta opcja wy\u0142\u0105czy obs\u0142ug\u0119 menu i powi\u0119kszania w MathJax, ale mo\u017Cesz klikn\u0105\u0107 z Altem na wz\u00F3r, aby otworzy\u0107 menu MathJax.\n\nCzy na pewno chcesz zmieni\u0107 ustawienia MathPlayer?",IE9warning:"Menu kontekstowe MathJax zostanie wy\u0142\u0105czone, ale mo\u017Cesz klikn\u0105\u0107 z Altem na wz\u00F3r, aby otworzy\u0107 menu.",NoOriginalForm:"Brak wzor\u00F3w w oryginalnej postaci",Close:"Zamknij",EqSource:"\u0179r\u00F3d\u0142o wzoru MathJax",STIXWeb:"STIX (www)",AsanaMathWeb:"Asana Math (www)",GyrePagellaWeb:"Gyre Pagella (www)",GyreTermesWeb:"Gyre Termes (www)",LatinModernWeb:"Latin Modern (www)",NeoEulerWeb:"Neo Euler (www)",CloseAboutDialog:"Zamknij okno o MathJax",FastPreview:"Szybki podgl\u0105d strony",AssistiveMML:"Asystuj\u0105cy MathML",InTabOrder:"Zawarty w kolejno\u015Bci stron"}});MathJax.Ajax.loadComplete("[MathJax]/localization/pl/MathMenu.js"); | PypiClean |
/EVE-SRP-0.12.11.tar.gz/EVE-SRP-0.12.11/src/evesrp/migrate/versions/4280bf2417c_.py | revision = '4280bf2417c'
down_revision = '2976d59f286'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import update, select, table, column, or_
import evesrp.transformers
division = table('division',
column('id', sa.Integer),
column('ship_transformer', sa.PickleType),
column('pilot_transformer', sa.PickleType),
)
transformerref = table('transformerref',
column('division_id', sa.Integer),
column('attribute_name', sa.String(length=50)),
column('transformer', sa.PickleType),
)
# This is tricky: Ensure that evesrp.transformers has ShipTransformer and
# PilotTransformer classes so pickle con unpack them
for legacy_transformer in ('ShipTransformer', 'PilotTransformer'):
if not hasattr(evesrp.transformers, legacy_transformer):
new_class = type(legacy_transformer,
(evesrp.transformers.Transformer,), {})
setattr(evesrp.transformers, legacy_transformer, new_class)
def upgrade():
# Create new transformerref table
op.create_table('transformerref',
sa.Column('id', sa.Integer, nullable=False, primary_key=True),
sa.Column('attribute_name', sa.String(length=50), nullable=False),
sa.Column('transformer', sa.PickleType, nullable=False),
sa.Column('division_id', sa.Integer, nullable=False),
sa.ForeignKeyConstraint(['division_id'], ['division.id'], ),
sa.UniqueConstraint('division_id', 'attribute_name')
)
# Migrate ship and pilot transformers
conn = op.get_bind()
columns = [division.c.id, division.c.ship_transformer,
division.c.pilot_transformer]
transformer_sel = select(columns)\
.where(or_(
division.c.ship_transformer != None,
division.c.pilot_transformer != None
))
transformer_rows = conn.execute(transformer_sel)
new_transformers = []
for division_id, ship_transformer, pilot_transformer in transformer_rows:
if ship_transformer is not None:
transformer = evesrp.transformers.Transformer(
ship_transformer.name,
ship_transformer.slug)
new_transformers.append({
'attribute_name': 'ship_type',
'transformer': transformer,
'division_id': division_id,
})
if pilot_transformer is not None:
transformer = evesrp.transformers.Transformer(
pilot_transformer.name,
pilot_transformer.slug)
new_transformers.append({
'attribute_name': 'pilot',
'transformer': transformer,
'division_id': division_id,
})
transformer_rows.close()
op.bulk_insert(transformerref, new_transformers)
# Drop old columns
op.drop_column('division', 'ship_transformer')
op.drop_column('division', 'pilot_transformer')
def downgrade():
# Add ship and pilot transformer columns back to division
op.add_column('division', sa.Column('ship_transformer', sa.PickleType))
op.add_column('division', sa.Column('pilot_transformer', sa.PickleType))
# Convert transformerrefs back to the old columns
conn = op.get_bind()
columns = [
transformerref.c.division_id,
transformerref.c.attribute_name,
transformerref.c.transformer,
]
transformer_sel = select(columns)\
.where(or_(
transformerref.c.attribute_name == 'ship_type',
transformerref.c.attribute_name == 'pilot',
))
transformer_rows = conn.execute(transformer_sel)
for division_id, attribute_name, transformer in transformer_rows:
if attribute_name == 'ship_type':
colname = 'ship'
transformer_class = evesrp.transformers.ShipTransformer
elif attribute_name == 'pilot':
colname = 'pilot'
transformer_class = evesrp.transformers.PilotTransformer
colname += '_transformer'
transformer = transformer_class(transformer.name, transformer.slug)
update_stmt = update(division)\
.where(division.c.id == division_id)\
.values({
colname: transformer
})
conn.execute(update_stmt)
transformer_rows.close()
# Drop the transformerref table. This is going to be lossy.
op.drop_table('transformerref') | PypiClean |
/NEURON-9.0a0-cp311-cp311-macosx_10_15_x86_64.whl/neuron/neuroml/morphml.py | from .xml2nrn import *
def cable(self, node):
self.lastcabid_ = -1
if self.in_cablegroup_:
self.cablegroups_[-1].cable_indices_.append(
self.cableid2index_[int(node.get("id"))]
)
else:
cab = self.cables_[self.cableid2index_[int(node.get("id"))]]
self.lastcabid_ = int(node.get("id"))
val = node.get("fractAlongParent")
if val is not None:
cab.px_ = float(val)
else:
cab.px_ = 1.0
cab.name_ = str(node.get("name"))
def cablegroup(self, node):
self.in_cablegroup_ = True
name = str(node.get("name"))
self.cablegroups_.append(CableGroup(name))
self.groupname2index_[name] = len(self.cablegroups_) - 1
def cables(self, node):
pass
def distal(self, node):
pt = Point(self.id, self.pid, self.cid, self.locator.getLineNumber())
self.ptid2pt_[self.id] = pt
pt.set(
float(node.get("x")),
float(node.get("y")),
float(node.get("z")),
float(node.get("diameter")),
)
if self.cable_.pcnt_ == 1:
proxpt = self.points_[len(self.points_) - 1]
if (
proxpt.x_ == pt.x_
and proxpt.y_ == pt.y_
and proxpt.z_ == pt.z_
and proxpt.d_ == pt.d_
):
if debug:
print("Prox and distal same, assuming spherical segment")
pt.y_ = pt.y_ + (pt.d_ / 2.0)
self.points_[len(self.points_) - 1].y_ = self.points_[
len(self.points_) - 1
].y_ - (self.points_[len(self.points_) - 1].d_ / 2.0)
if debug:
print("New distal: " + str(pt))
print("New proximal: " + str(self.points_[len(self.points_) - 1]))
self.points_.append(pt)
self.cable_.pcnt_ += 1
if debug:
print("Distal: " + str(pt))
print("Cable ", self.cable_.id_, " has ", self.cable_.pcnt_, " points")
def proximal(self, node):
self.nprox += 1
pt = Point(-1, self.pid, self.cid, self.locator.getLineNumber())
pt.set(
float(node.get("x")),
float(node.get("y")),
float(node.get("z")),
float(node.get("diameter")),
)
self.points_.append(pt)
self.cable_.pcnt_ += 1
if debug:
print("Proximal: " + str(pt))
print("Cable ", self.cable_.id_, " has ", self.cable_.pcnt_, " points")
def segment(self, node):
self.id = int(node.get("id"))
self.cid = int(node.get("cable"))
parent_cable_id = -1
p = node.get("parent")
if p is not None:
self.pid = int(p)
parent_cable_id = self.ptid2pt_[self.pid].cid_
else:
self.pid = -1
if debug:
print(
"\nsegment id=",
self.id,
" cable=",
self.cid,
" parent id=",
self.pid,
" parent_cable_id=",
parent_cable_id,
)
if self.cable_ is None:
self.cable_ = Cable(self.cid, self.pid, len(self.points_))
self.cableid2index_[self.cid] = len(self.cables_)
self.cables_.append(self.cable_)
self.cable_.parent_cable_id_ = parent_cable_id
if self.cable_.id_ != self.cid:
self.cable_ = Cable(self.cid, self.pid, len(self.points_))
self.cableid2index_[self.cid] = len(self.cables_)
self.cables_.append(self.cable_)
self.cable_.parent_cable_id_ = parent_cable_id
def segments(self, node):
self.in_cablegroup_ = False
self.points_ = []
self.cables_ = []
self.cable_ = None
self.id = -1
self.cid = -1
self.pid = -1
self.nprox = 0
self.cableid2index_ = {}
self.ptid2pt_ = {}
self.cablegroups_ = []
self.groupname2index_ = {}
def segments_end(self, node):
if debug:
print("\nEnd of segments element")
ic = 0
ip = 0
for cab in self.cables_:
ic += 1
for i in range(cab.first_, cab.first_ + cab.pcnt_):
pt = self.points_[i]
print(ip, pt.id_, pt.pid_, pt.x_, pt.y_, pt.z_, pt.d_)
ip += 1
print("ncable=", ic, " npoint=", ip, " nprox=", self.nprox, "\n")
return | PypiClean |
/BobBuildTool-0.23.1.tar.gz/BobBuildTool-0.23.1/pym/bob/cmds/build/status.py |
from ...builder import LocalBuilder, checkoutsFromState
from ...input import RecipeSet
from ...scm import getScm, ScmTaint, ScmStatus
from ...state import BobState
from ...tty import colorize, ERROR, WARNING, EXECUTED, DEFAULT, SKIPPED, \
IMPORTANT, NORMAL, INFO, DEBUG, TRACE, HEADLINE
from ...utils import joinLines, processDefines
from textwrap import indent
import argparse
import os
from .state import DevelopDirOracle
__all__ = ['doStatus']
# Flag to headline verbosity. The description is shown on the next level.
FLAG_TO_VERBOSITY = {
ScmTaint.attic : NORMAL,
ScmTaint.collides : NORMAL, # not modified, but will break the build
ScmTaint.error : IMPORTANT, # error, modified
ScmTaint.modified : NORMAL, # modified
ScmTaint.new : NORMAL,
ScmTaint.overridden : DEBUG,
ScmTaint.switched : NORMAL, # modified
ScmTaint.unknown : NORMAL, # cannot tell, could be modified
ScmTaint.unpushed_main : NORMAL, # modified
ScmTaint.unpushed_local : INFO, # not modified but user may loose data
}
assert set(FLAG_TO_VERBOSITY.keys()) == set(ScmTaint)
class PackagePrinter:
def __init__(self, verbose, showClean, showOverrides, checkoutStep = None):
self.verbose = verbose
self.flagVerbosity = FLAG_TO_VERBOSITY.copy()
self.showClean = showClean
if showOverrides: self.flagVerbosity[ScmTaint.overridden] = NORMAL
self.headerShown = checkoutStep is None
self.checkoutStep = checkoutStep
def __printHeader(self):
if not self.headerShown:
print(">>", colorize("/".join(self.checkoutStep.getPackage().getStack()),
HEADLINE))
self.headerShown = True
def __printStatus(self, flags, message, color):
print(colorize(" STATUS {0: <4} {1}".format(flags, message), color))
def show(self, status, dir):
detailedFlags = { flag for flag,severity in self.flagVerbosity.items()
if severity < self.verbose }
# Determine severity of headline. If showClean start directly at NORMAL
# level.
severity = NORMAL if self.showClean else DEBUG
for flag in status.flags:
severity = min(self.flagVerbosity[flag], severity)
flags = str(status)
if status.error:
color = ERROR
elif status.dirty or (status.flags & {ScmTaint.unknown, ScmTaint.collides}):
color = WARNING
elif flags:
color = EXECUTED
else:
color = DEFAULT
if severity <= self.verbose:
self.__printHeader()
self.__printStatus(flags, dir, color)
description = status.description(detailedFlags)
if description:
for line in description.splitlines():
print(' ' + line)
def skipped(self):
# skipped workspaces are shown only on '-vvv' at least
if TRACE <= self.verbose:
self.__printHeader()
self.__printStatus("",
"skipped ({} does not exist)".format(self.checkoutStep.getWorkspacePath()),
SKIPPED)
ATTIC = ScmStatus(ScmTaint.attic,
description="> Recipe changed. Will be moved to attic on next checkout.")
UNKNOWN = ScmStatus(ScmTaint.unknown,
description="> Workspace too old. Cannot determine status.")
class Printer:
def __init__(self, recurse, verbose, showClean, showOverrides, showAttic):
self.recurse = recurse
self.verbose = verbose
self.showClean = showClean
self.showOverrides = showOverrides
self.doneSteps = set()
self.donePackages = set()
self.showAttic = showAttic
def __showCheckoutStep(self, pp, checkoutStep):
workspace = checkoutStep.getWorkspacePath()
oldCheckoutState = BobState().getDirectoryState(workspace, True)
checkoutState = checkoutStep.getScmDirectories()
scms = { scm.getDirectory() : scm for scm in checkoutStep.getScmList() }
result = {}
# First scan old checkout state. This is what the user is most
# interested in. The recipe might have changed compared to the
# persisted state!
for (scmDir, (scmDigest, scmSpec)) in checkoutsFromState(oldCheckoutState):
if not os.path.exists(os.path.join(workspace, scmDir)): continue
if scmDigest == checkoutState.get(scmDir, (None, None))[0]:
# The digest still matches -> use recipe values
status = scms[scmDir].status(workspace)
elif scmSpec is not None:
# New project that kept scm spec -> compare with that and mark
# as attic
status = getScm(scmSpec).status(workspace)
status.merge(ATTIC)
else:
# Don't know anything about it except that this will be moved
# to the attic
status = ScmStatus()
status.merge(ATTIC)
status.merge(UNKNOWN)
result[scmDir] = status
# Additionally scan current checkout state to find new checkouts and
# determinte override status.
for scmDir in checkoutState.keys():
status = result.setdefault(scmDir, ScmStatus(ScmTaint.new))
if (ScmTaint.new in status.flags) and os.path.exists(os.path.join(workspace, scmDir)):
status.add(ScmTaint.collides,
"> Collides with existing file in workspace.")
elif ScmTaint.attic in status.flags:
status.add(ScmTaint.new)
# The override status is taken from the recipe scm. This is
# independent of any actual checkout.
overrides = scms[scmDir].getActiveOverrides()
for o in overrides:
status.add(ScmTaint.overridden, joinLines("> Overridden by:",
indent(str(o), ' ')))
for (scmDir, status) in sorted(result.items()):
pp.show(status, os.path.join(workspace, scmDir))
def __showAtticDirs(self, pp, prefix=""):
for d in sorted(BobState().getAtticDirectories()):
if not os.path.isdir(d):
BobState().delAtticDirectoryState(d)
continue
if not d.startswith(prefix): continue
scmSpec = BobState().getAtticDirectoryState(d)
if scmSpec is not None:
# We must remove the 'dir' propery if present because the attic
# directory is already the final directory.
if 'dir' in scmSpec: del scmSpec['dir']
status = getScm(scmSpec).status(d)
else:
status = UNKNOWN
pp.show(status, d)
def showPackage(self, package):
if package._getId() in self.donePackages: return
self.donePackages.add(package._getId())
checkoutStep = package.getCheckoutStep()
if checkoutStep.isValid() and (checkoutStep.getVariantId() not in self.doneSteps):
pp = PackagePrinter(self.verbose, self.showClean, self.showOverrides,
checkoutStep)
workspace = checkoutStep.getWorkspacePath()
if workspace is not None:
if os.path.isdir(workspace):
self.__showCheckoutStep(pp, checkoutStep)
else:
pp.skipped()
if self.showAttic:
# The last path element (/workspace) must be removed because
# attics are located next to the workspace, not inside it.
self.__showAtticDirs(pp, os.path.dirname(workspace))
self.doneSteps.add(checkoutStep.getVariantId())
if self.recurse:
for d in package.getDirectDepSteps():
self.showPackage(d.getPackage())
def showAllDirs(self, showAttic):
pp = PackagePrinter(self.verbose, self.showClean, self.showOverrides)
for workspace in sorted(BobState().getDirectories()):
dirState = BobState().getDirectoryState(workspace, False)
# Only the checkout state is stored as dict. Use that to find out
# which are the right directories.
if not isinstance(dirState, dict):
continue
if not os.path.isdir(workspace):
BobState().delDirectoryState(workspace)
continue
# Upgrade from old format without scmSpec.
dirState = sorted(
(dir, state) if isinstance(state, tuple) else (dir, (state, None))
for dir,state in checkoutsFromState(dirState))
for (scmDir, (scmDigest, scmSpec)) in dirState:
scmDir = os.path.join(workspace, scmDir)
if scmSpec is not None:
status = getScm(scmSpec).status(workspace)
else:
status = UNKNOWN
pp.show(status, scmDir)
if showAttic:
self.__showAtticDirs(pp)
def doStatus(argv, bobRoot):
parser = argparse.ArgumentParser(prog="bob status", description='Show SCM status')
parser.add_argument('packages', nargs='*', help="(Sub-)packages")
group = parser.add_mutually_exclusive_group()
group.add_argument('--develop', action='store_true', dest='develop', help="Use developer mode", default=True)
group.add_argument('--release', action='store_false', dest='develop', help="Use release mode")
parser.add_argument('-c', dest="configFile", default=[], action='append',
help="Use config File")
parser.add_argument('-D', default=[], action='append', dest="defines",
help="Override default environment variable")
parser.add_argument('--attic', action='store_true',
help="Additionally look in/for attic directories")
parser.add_argument('-r', '--recursive', default=False, action='store_true',
help="Recursively display dependencies")
group = parser.add_mutually_exclusive_group()
group.add_argument('--sandbox', action='store_true', help="Enable sandboxing")
group.add_argument('--no-sandbox', action='store_false', dest='sandbox', help="Disable sandboxing")
parser.set_defaults(sandbox=None)
parser.add_argument('--show-clean', action='store_true',
help="Show SCM status even if checkout is unmodified")
parser.add_argument('--show-overrides', action='store_true',
help="Show SCM status if affected by an scmOverrides")
parser.add_argument('-v', '--verbose', default=NORMAL, action='count',
help="Increase verbosity (may be specified multiple times)")
args = parser.parse_args(argv)
if args.sandbox == None:
args.sandbox = not args.develop
defines = processDefines(args.defines)
recipes = RecipeSet()
recipes.defineHook('releaseNameFormatter', LocalBuilder.releaseNameFormatter)
recipes.defineHook('developNameFormatter', LocalBuilder.developNameFormatter)
recipes.defineHook('developNamePersister', None)
recipes.setConfigFiles(args.configFile)
recipes.parse(defines)
if args.develop:
# Develop names are stable. All we need to do is to replicate build's algorithm,
# and when we produce a name, check whether it exists.
nameFormatter = recipes.getHook('developNameFormatter')
developPersister = DevelopDirOracle(nameFormatter, recipes.getHook('developNamePersister'))
nameFormatter = developPersister.getFormatter()
else:
# Release names are taken from persistence.
nameFormatter = LocalBuilder.releaseNameInterrogator
nameFormatter = LocalBuilder.makeRunnable(nameFormatter)
packages = recipes.generatePackages(nameFormatter, args.sandbox)
if args.develop: developPersister.prime(packages)
# Dummy query of attic directories. Will warn if project directory was
# created before Bob 0.15 where they were not tracked!
if args.attic:
BobState().getAtticDirectories()
# Set BobState into asynchronous mode because we might remove many entries
# if their directories do not exist anymore.
BobState().setAsynchronous()
try:
printer = Printer(args.recursive, args.verbose, args.show_clean,
args.show_overrides, args.attic)
if args.packages:
for p in args.packages:
for package in packages.queryPackagePath(p):
printer.showPackage(package)
else:
printer.showAllDirs(args.attic)
finally:
BobState().setSynchronous() | PypiClean |
/observations-0.1.4.tar.gz/observations-0.1.4/observations/r/vote2.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def vote2(path):
"""vote2
Data loads lazily. Type data(vote2) into the console.
A data.frame with 186 rows and 26 variables:
- state. state postal code
- district. U.S. Congressional district
- democ. =1 if incumbent democrat
- vote90. inc. share two-party vote, 1990
- vote88. inc. share two-party vote, 1988
- inexp90. inc. camp. expends., 1990
- chexp90. chl. camp. expends., 1990
- inexp88. inc. camp. expends., 1988
- chexp88. chl. camp. expends., 1988
- prtystr. percent vote pres., same party, 1988
- rptchall. =1 if a repeat challenger
- tenure. years in H.R.
- lawyer. =1 if law degree
- linexp90. log(inexp90)
- lchexp90. log(chexp90)
- linexp88. log(inexp88)
- lchexp88. log(chexp88)
- incshr90. 100\*(inexp90/(inexp90+chexp90))
- incshr88. 100\*(inexp88/(inexp88+chexp88))
- cvote. vote90 - vote88
- clinexp. linexp90 - linexp88
- clchexp. lchexp90 - lchexp88
- cincshr. incshr90 - incshr88
- win88. =1 by definition
- win90. =1 if inc. wins, 1990
- cwin. win90 - win88
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `vote2.csv`.
Returns:
Tuple of np.ndarray `x_train` with 186 rows and 26 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'vote2.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/wooldridge/vote2.csv'
maybe_download_and_extract(path, url,
save_file_name='vote2.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | PypiClean |
/MatchZoo-2.2.0.tar.gz/MatchZoo-2.2.0/matchzoo/datasets/snli/load_data.py |
import typing
from pathlib import Path
import pandas as pd
import keras
import matchzoo
_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
def load_data(
stage: str = 'train',
task: str = 'classification',
target_label: str = 'entailment',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load SNLI data.
:param stage: One of `train`, `dev`, and `test`. (default: `train`)
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance. (default: `ranking`)
:param target_label: If `ranking`, chose one of `entailment`,
`contradiction`, `neutral`, and `-` as the positive label.
(default: `entailment`)
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'snli_1.0_{stage}.txt')
data_pack = _read_data(file_path)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
if target_label not in ['entailment', 'contradiction', 'neutral', '-']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`, "
f"`neutral` and `-`.")
binary = (data_pack.relation['label'] == target_label).astype(float)
data_pack.relation['label'] = binary
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
classes = ['entailment', 'contradiction', 'neutral', '-']
label = data_pack.relation['label'].apply(classes.index)
data_pack.relation['label'] = label
data_pack.one_hot_encode_label(num_classes=4, inplace=True)
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'snli', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='snli'
)
return Path(ref_path).parent.joinpath('snli_1.0')
def _read_data(path):
table = pd.read_csv(path, sep='\t')
df = pd.DataFrame({
'text_left': table['sentence1'],
'text_right': table['sentence2'],
'label': table['gold_label']
})
df = df.dropna(axis=0, how='any').reset_index(drop=True)
return matchzoo.pack(df) | PypiClean |
/Flask_Admin-1.6.1-py3-none-any.whl/flask_admin/model/fields.py | import itertools
from wtforms.validators import ValidationError
from wtforms.fields import FieldList, FormField, SelectFieldBase
try:
from wtforms.fields import _unset_value as unset_value
except ImportError:
from wtforms.utils import unset_value
from flask_admin._compat import iteritems
from .widgets import (InlineFieldListWidget, InlineFormWidget,
AjaxSelect2Widget)
class InlineFieldList(FieldList):
widget = InlineFieldListWidget()
def __init__(self, *args, **kwargs):
super(InlineFieldList, self).__init__(*args, **kwargs)
def __call__(self, **kwargs):
# Create template
meta = getattr(self, 'meta', None)
if meta:
template = self.unbound_field.bind(form=None, name='', _meta=meta)
else:
template = self.unbound_field.bind(form=None, name='')
# Small hack to remove separator from FormField
if isinstance(template, FormField):
template.separator = ''
template.process(None)
return self.widget(self,
template=template,
check=self.display_row_controls,
**kwargs)
def display_row_controls(self, field):
return True
def process(self, formdata, data=unset_value, extra_filters=None):
res = super(InlineFieldList, self).process(
formdata, data)
# Postprocess - contribute flag
if formdata:
for f in self.entries:
key = 'del-%s' % f.id
f._should_delete = key in formdata
return res
def validate(self, form, extra_validators=tuple()):
"""
Validate this FieldList.
Note that FieldList validation differs from normal field validation in
that FieldList validates all its enclosed fields first before running any
of its own validators.
"""
self.errors = []
# Run validators on all entries within
for subfield in self.entries:
if not self.should_delete(subfield) and not subfield.validate(form):
self.errors.append(subfield.errors)
chain = itertools.chain(self.validators, extra_validators)
self._run_validation_chain(form, chain)
return len(self.errors) == 0
def should_delete(self, field):
return getattr(field, '_should_delete', False)
def populate_obj(self, obj, name):
values = getattr(obj, name, None)
try:
ivalues = iter(values)
except TypeError:
ivalues = iter([])
candidates = itertools.chain(ivalues, itertools.repeat(None))
_fake = type(str('_fake'), (object, ), {})
output = []
for field, data in zip(self.entries, candidates):
if not self.should_delete(field):
fake_obj = _fake()
fake_obj.data = data
field.populate_obj(fake_obj, 'data')
output.append(fake_obj.data)
setattr(obj, name, output)
class InlineFormField(FormField):
"""
Inline version of the ``FormField`` widget.
"""
widget = InlineFormWidget()
class InlineModelFormField(FormField):
"""
Customized ``FormField``.
Excludes model primary key from the `populate_obj` and
handles `should_delete` flag.
"""
widget = InlineFormWidget()
def __init__(self, form_class, pk, form_opts=None, **kwargs):
super(InlineModelFormField, self).__init__(form_class, **kwargs)
self._pk = pk
self.form_opts = form_opts
def get_pk(self):
if isinstance(self._pk, (tuple, list)):
return tuple(getattr(self.form, pk).data for pk in self._pk)
return getattr(self.form, self._pk).data
def populate_obj(self, obj, name):
for name, field in iteritems(self.form._fields):
if name != self._pk:
field.populate_obj(obj, name)
class AjaxSelectField(SelectFieldBase):
"""
Ajax Model Select Field
"""
widget = AjaxSelect2Widget()
separator = ','
def __init__(self, loader, label=None, validators=None, allow_blank=False, blank_text=u'', **kwargs):
super(AjaxSelectField, self).__init__(label, validators, **kwargs)
self.loader = loader
self.allow_blank = allow_blank
self.blank_text = blank_text
def _get_data(self):
if self._formdata:
model = self.loader.get_one(self._formdata)
if model is not None:
self._set_data(model)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _format_item(self, item):
value = self.loader.format(self.data)
return (value[0], value[1], True)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == u'__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank and self.data is None:
raise ValidationError(self.gettext(u'Not a valid choice'))
class AjaxSelectMultipleField(AjaxSelectField):
"""
Ajax-enabled model multi-select field.
"""
widget = AjaxSelect2Widget(multiple=True)
def __init__(self, loader, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(AjaxSelectMultipleField, self).__init__(loader, label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata:
data = []
# TODO: Optimize?
for item in formdata:
model = self.loader.get_one(item) if item else None
if model:
data.append(model)
else:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def process_formdata(self, valuelist):
self._formdata = set()
for field in valuelist:
for n in field.split(self.separator):
self._formdata.add(n)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext(u'Not a valid choice')) | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/dizoo/atari/config/serial/spaceinvaders/spaceinvaders_onppo_config.py | from copy import deepcopy
from easydict import EasyDict
spaceinvaders_ppo_config = dict(
exp_name='spaceinvaders_onppo_seed0',
env=dict(
collector_env_num=16,
evaluator_env_num=8,
n_evaluator_episode=8,
stop_value=int(1e10),
env_id='SpaceInvadersNoFrameskip-v4',
#'ALE/SpaceInvaders-v5' is available. But special setting is needed after gym make.
frame_stack=4,
manager=dict(shared_memory=False, )
),
policy=dict(
cuda=True,
recompute_adv=True,
action_space='discrete',
model=dict(
obs_shape=[4, 84, 84],
action_shape=6,
action_space='discrete',
encoder_hidden_size_list=[64, 64, 128],
actor_head_hidden_size=128,
critic_head_hidden_size=128,
),
learn=dict(
epoch_per_collect=10,
update_per_collect=1,
batch_size=320,
learning_rate=3e-4,
value_weight=0.5,
entropy_weight=0.001,
clip_ratio=0.2,
adv_norm=True,
value_norm=True,
# for onppo, when we recompute adv, we need the key done in data to split traj, so we must
# use ignore_done=False here,
# but when we add key traj_flag in data as the backup for key done, we could choose to use ignore_done=True
# for halfcheetah, the length=1000
ignore_done=False,
grad_clip_type='clip_norm',
grad_clip_value=0.5,
),
collect=dict(
n_sample=3200,
unroll_len=1,
discount_factor=0.99,
gae_lambda=0.95,
),
eval=dict(evaluator=dict(eval_freq=5000, )),
),
)
main_config = EasyDict(spaceinvaders_ppo_config)
spaceinvaders_ppo_create_config = dict(
env=dict(
type='atari',
import_names=['dizoo.atari.envs.atari_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='ppo'),
)
create_config = EasyDict(spaceinvaders_ppo_create_config)
if __name__ == "__main__":
# or you can enter ding -m serial_onpolicy -c spaceinvaders_onppo_config.py -s 0
from ding.entry import serial_pipeline_onpolicy
serial_pipeline_onpolicy([main_config, create_config], seed=0) | PypiClean |
/MAGPlus-0.2.2.tar.gz/MAGPlus-0.2.2/magplus/magplus.py |
DEBUG = False
import urllib2
import re
from datetime import datetime
from xml.dom.minidom import parseString
from operator import itemgetter
import urlparse
import bs4
class MinecraftAssetsGetter:
# last backslash in fileUrl is important
def __init__(self, xmlUrl='http://assets.minecraft.net/', fileUrl='http://assets.minecraft.net/'):
self.fileUrl = fileUrl
file = urllib2.urlopen(xmlUrl)
data = file.read()
file.close()
self.dom = parseString(data)
def getVanillaVersionList(self, stable=True):
""" Returns a list of versions, sorted by date, from earliest to
latest. versions[-1] would be the latest version.
stable: Determines whether only stable releases are returned or not.
"""
contents = self.dom.getElementsByTagName("Contents")
dt = datetime.now()
#version_list = list()
unsorted_list = []
for content in contents:
# Example build_dict
# {'version': '12w23' or '1_2_5', 'date': '2011-11-24T13:20:06.000Z'}
build_dict = {}
key = getText(content.getElementsByTagName("Key")[0].childNodes)
date = getText(content.getElementsByTagName("LastModified")[0].childNodes)
try:
version, file = key.split('/')
if len(file) == 0:
#print key, ", ", key.split('/'), ', ', version
continue
except ValueError:
continue
# Ensure this is a JAR
if file == "minecraft.jar" or file == "minecraft_server.jar":
# Filter for stable or unstable version
# Regex: 2 digits, 'w', 2 digits, 1 letter
if stable and "pre" not in version and "rc" not in version and not re.search("^\d{2}w\d{2}[a-zA-Z]{1}$", version):
if DEBUG:
print "Adding stable version %s, date %s" % (version, date)
build_dict['version'] = version
build_dict['date'] = date
unsorted_list.append(build_dict)
elif not stable:
if "pre" in version or "rc" in version or re.search("^\d{2}w\d{2}[a-zA-Z]{1}$", version):
if DEBUG:
print "Adding unstable version %s, date %s" % (version, date)
build_dict['version'] = version
build_dict['date'] = date
unsorted_list.append(build_dict)
else:
#print "Unknown type found. Version %s, date %s" % (version, date)
# Caught a stable release with unstable=True or vice versa.
continue
sorted_list = list()
sorted_list = sorted(unsorted_list, key=itemgetter('date'))
#for a in sorted_list:
#print a
sorted_unique_list = list()
for b in self.unique_keys(sorted_list):
#print b
sorted_unique_list.append(b)
# Filter duplicates
#seen = set()
#seen_add = seen.add
#sorted_unique_list = [ x for x in sorted_list if x not in seen and not seen_add(x)]
#for item in sorted_unique_list:
#print item
return sorted_unique_list
def getBukkitVersionList(self, stable=True):
""" Returns a list of versions, sorted by date, from earliest to
latest. versions[-1] would be the latest version.
stable: Determines whether only stable (recommended) releases are returned or not.
Each version is a dict with the following keys:
build_number: Build number with # prepended, used for sorting.
build_name: Official name of the release
download_link: Link (minus base URL) for the jar. None if no download link provided,
or if build is a broken build.
"""
# TODO: Make this less fragile!!!
bukkit_base_url = 'http://dl.bukkit.org/'
build_list = []
if stable:
html = urllib2.urlopen('http://dl.bukkit.org/downloads/craftbukkit/list/rb/').read()
soup = bs4.BeautifulSoup(html)
build_rows = soup.find_all('tr', {"class": "chan-rb"})
else:
html = urllib2.urlopen('http://dl.bukkit.org/downloads/craftbukkit/list/beta/').read()
soup = bs4.BeautifulSoup(html)
build_rows = soup.find_all('tr', {"class": "chan-beta"})
# Process each row in the table
for build_row in build_rows:
build_dict = {}
# Process the specific row
for row in build_row:
for row_elem in row:
if isinstance(row_elem, bs4.element.Tag):
# Check if it is a link
if row_elem.name == 'a':
# Check if the link is the download link (has a tool tip)
if "class" in row_elem.attrs and row_elem.attrs["class"][0] == 'tooltipd':
download_link = urlparse.urljoin(bukkit_base_url, row_elem.attrs["href"])
build_dict['download_link'] = download_link
else:
# Link back to the download page, ignore.
if "/downloads/craftbukkit/list/" in row_elem.attrs["href"]:
continue
else:
# Grab the text from the link. This is the build number
build_dict['build_number'] = row_elem.string
# Plain string, not a link. Find the build_name
elif isinstance(row_elem, bs4.element.NavigableString):
# Ignore empty strings
if row_elem.string == ' ' or row_elem == '\n' or row_elem is None:
continue
else:
# Left over is build_name
build_dict['build_name'] = row_elem.string
# If no download link found, set to None. Cleaner than always check 'in'
if "download_link" not in build_dict:
build_dict["download_link"] = None
build_list.append(build_dict)
# Sort based on build numbers. Newest builds will be last.
return sorted(build_list, key=itemgetter('build_number'))
def getLatestVanillaServer(self, stable=True):
""" Returns the URL of the latest server version.
table: Determines whether only stable releases are returned or not.
"""
version_list = self.getVanillaVersionList(stable)
return self.getVanillaServerUrl(version_list[-1]['version'])
def getLatestBukkitServer(self, stable=True):
version_list = self.getBukkitVersionList(stable)
return version_list[-1]["download_link"]
def getNewerVanillaVersion(self, current_version, stable=True):
""" Given stable and the current version, attempts to find a newer
version. Current version must a key in getVanillaVersionList, so
something like 1_2_5, 12w23, rc2, etc.
Returns a build_dict {'version', 'date'} or None if current_version
is up to date.
Raises SyntaxError if current_version is improperly formatted
If current_version is None, returns latest version
"""
if current_version is None:
return self.getVanillaVersionList(stable)[-1]
version_list = self.getVanillaVersionList(stable)
# Find the date of current_version by iterating the list
current_date = None
for version in version_list:
if version['version'] == current_version:
current_date = version['date']
# Could not find in list.
if DEBUG:
print version_list
if current_date is None:
raise SyntaxError("current_version was not found in version list.\
Either you have an improperly formatted version or a really old version (pre 1.8)")
latest_version = version_list[-1]
if latest_version['date'] > current_date:
return latest_version
else:
return None
def getNewerBukkitVersion(self, current_version, stable=True):
""" Given stable and the current version, attempts to find a newer
version. Current version must a key in getVanillaVersionList, so
something like 1_2_5, 12w23, rc2, etc.
Returns a build_dict {'version', 'date'} or None if current_version
is up to date.
Raises SyntaxError if current_version is improperly formatted
If current_version is None, returns latest version
"""
if current_version is None:
return self.getBukkitVersionList(stable)[-1]
version_list = self.getBukkitVersionList(stable)
# Find the date of current_version by iterating the list
current_build_number = None
for version in version_list:
if DEBUG:
print version['build_name'], current_version
if version['build_name'] == current_version:
current_build_number = version['build_number']
# Could not find in list.
if current_build_number is None:
raise SyntaxError("current_version was not found in version list.\
Either you have an improperly formatted version or a really old version (pre 1.8)")
latest_version = version_list[-1]
if latest_version['build_number'] > current_build_number:
return latest_version
else:
return None
def getLatestClient(self, stable=True):
""" Returns the URL of the latest client version.
table: Determines whether only stable releases are returned or not.
"""
version_list = self.getVanillaVersionList(stable)
#print version_list
return self.getClientUrl(version_list[-1]['version'])
def getVanillaServer(self, stable=True, versions_old=0):
""" Returns the URL of the latest server version.
table: Determines whether only stable releases are returned or not.
Returns None if too versions old is more than available servers.
"""
version_list = self.getVanillaVersionList(stable)
#print len(version_list), ', ', version_list
if versions_old + 1 > len(version_list):
return None
return self.getVanillaServerUrl(version_list[-1 - versions_old]['version'])
def getBukkitServer(self, stable=True, versions_old=0):
version_list = self.getBukkitVersionList(stable)
if versions_old + 1 > len(version_list):
return None
return version_list[-1 - versions_old]["download_link"]
def getClient(self, stable=True, versions_old=0):
""" Returns the URL of the latest client version.
table: Determines whether only stable releases are returned or not.
Returns None if too versions old is more than available servers.
"""
version_list = self.getVanillaVersionList(stable)
if versions_old + 1 > len(version_list):
return None
return self.getClientUrl(version_list[-1 - versions_old]['version'])
def getVanillaServerUrl(self, version):
""" Returns the URL of a given server version. """
return unicode(self.fileUrl) + version + unicode("/minecraft_server.jar")
def getClientUrl(self, version):
""" Returns the URL of a given client version. """
return unicode(self.fileUrl) + str(version) + unicode("/minecraft.jar")
def getVanillaWeeklyList(self):
return filterVersionList(self.getVanillaVersionList(), re.compile(r"\d{2}w\d{2}\w"))
def getVanillaMinecraftList(self):
allVersions = self.getVanillaVersionList()
weekly = self.getWeeklyList()
return [x for x in allVersions if x not in weekly]
def unique_keys(self, items):
seen = set()
for item in items:
key = item['version']
if key not in seen:
seen.add(key)
yield item
else:
# its a duplicate key, drop.
pass
def getText(nodelist):
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
def filterVersionList(vlist, pattern):
"""call: filterVersionList( VersionList, re.comiple(r"\d{2}w\d{2}\w" )"""
return_list = [x for x in vlist if not pattern.match(x) is None]
if DEBUG:
for version in return_list:
print version
return return_list
if __name__ == '__main__':
mag = MinecraftAssetsGetter()
#for version in mag.getVanillaVersionList():
#print version
#print "Latest Prerelease Client: ", mag.getLatestClient(stable=False)
#print "Latest Stable Client: ", mag.getLatestClient()
#print "Latest Prerelease Server: ", mag.getLatestVanillaServer(stable=False)
#print "Latest Stable Server: ", mag.getLatestVanillaServer()
#print "Previous Prerelease Server: ", mag.getVanillaServer(stable=False, versions_old=1)
#print "Previous Stable Server: ", mag.getVanillaServer(versions_old=1)
#print "Latest Bukkit Recommended Server: ", mag.getLatestBukkitServer()
#print "Latest Bukkit Beta Server: ", mag.getLatestBukkitServer(stable=False)
#print "Previous Bukkit Recommended Server: ", mag.getBukkitServer(versions_old=1)
#print "Previous Bukkit Beta Server: ", mag.getBukkitServer(stable=False, versions_old=1)
#print "Vanilla Version List: ", mag.getVanillaVersionList(stable=False)
#print "Bukkit Stable Version List: ", mag.getBukkitVersionList(stable=True)
#print "Bukkit Unstable Version List: ", mag.getBukkitVersionList(stable=False)
print "Newer Stable Vanilla Version (given None)? Yes: ", mag.getNewerVanillaVersion(None, True)
print "Newer Unstable Vanilla Version? Yes: ", mag.getNewerVanillaVersion('12w22a', False)
print "Newer Stable Vanilla Version? Yes: ", mag.getNewerVanillaVersion('1_2', True)
print "Newer Stable Vanilla Version? No. ", mag.getNewerVanillaVersion('1_2_5', True)
print "Newer Stable Bukkit Version (given None)? Yes: ", mag.getNewerBukkitVersion(None, True)
print "Newer Unstable Bukkit Version? Yes: ", mag.getNewerBukkitVersion('1.2.3-R0.1', False)
print "Newer Stable Bukkit Version? Yes: ", mag.getNewerBukkitVersion('1.1-R1', True)
print "Newer Stable Bukkit Version? No. ", mag.getNewerBukkitVersion('1.2.5-R4.0', True) | PypiClean |
/ConfigHandler-python-2.0.2.tar.gz/ConfigHandler-python-2.0.2/config_handler/_ui.py | import os
import shutil
import textwrap
from typing import Dict
from typing import Union
from config_handler import info
def clearScreen() -> None:
os.system("cls" if os.name == "nt" else "clear")
def confirm(message: str = "Press enter to continue...") -> None:
input(message)
class InputBox:
"""
Show an input box to the user and return their input.
"""
def __init__(
self,
title: str = info.title,
description: Union[str, None] = None,
margin: int = 4,
title_fill_char: str = ' ',
clear_screen: bool = True,
input_prompt: str = " >>> "
):
"""
Initialize the InputBox() class.
:param title: The title of the input box. (default: <info.title>)
:param description: The description of the input box. (default: None)
:param margin: The margin of the description. (default: 4)
:param title_fill_char: The character to fill the sides of the title with. (default: ' ')
:param clear_screen: Whether to clear the screen before showing the dialog. (default: True)
:param input_prompt: The prompt to show beside the user's input field. (default: " >>> ")
"""
self.title = title
self.description = description
self.margin = margin
self.title_fill_char = title_fill_char
self.clear_screen = clear_screen
self.input_prompt = input_prompt
def __call__(self) -> str:
"""
Show the dialog to the user and return their input.
:returns: The input of the user.
"""
if self.clear_screen:
clearScreen()
print(self.__buildDialog())
return input(self.input_prompt)
def __str__(self) -> str:
"""
Get a string representation of the dialog.
This will not clear the screen nor ask for the user's input.
:returns: The string representation of the dialog.
"""
return self.__buildDialog()
def __buildDialog(self) -> str:
"""
Build the dialog.
:returns: The dialog.
"""
# Center and add the title.
result: str = f"\n{self.title.center(shutil.get_terminal_size().columns, self.title_fill_char)}\n\n"
if self.description is not None: # Center and add the description.
for desc_line in self.description.split('\n'):
for line in textwrap.wrap(
desc_line,
shutil.get_terminal_size().columns - (self.margin * 2)
):
result += f"{line.center(shutil.get_terminal_size().columns)}\n"
result += '\n'
return result
class Choices:
"""
Show a menu of choices to the user and return the choice they make.
"""
def __init__(
self,
list_of_choices: Dict[str, str],
title: str = info.title,
description: Union[str, None] = None,
minimum_spaces: int = 1,
margin: int = 4,
title_fill_char: str = ' ',
clear_screen: bool = True,
case_sensitive: bool = False,
input_prompt: str = " >>> "
):
"""
Initialize the Choice() class.
:param list_of_choices: A dictionary containing the ID and description of each choice.
:param title: The title of the choice dialog. (default: <info.title>)
:param description: A description about the choice dialog. (default: None)
:param minimum_spaces: The minimum number of spaces between the ID and description. (default: 1)
:param margin: The margin of the description. (default: 4)
:param title_fill_char: The character to fill the sides of the title with. (default: ' ')
:param clear_screen: Whether to clear the screen before showing the dialog. (default: True)
:param case_sensitive: Whether to ignore case when comparing the user's input to the IDs. (default: False)
:param input_prompt: The prompt to show beside the user's input field. (default: " >>> ")
"""
self.list_of_choices = list_of_choices
self.title = title
self.description = description
self.minimum_spaces = minimum_spaces
self.margin = margin
self.title_fill_char = title_fill_char
self.clear_screen = clear_screen
self.case_sensitive = case_sensitive
self.input_prompt = input_prompt
def __call__(self, prompt_only: bool = False) -> str:
"""
Show the dialog to the user and return the choice they make.
:returns: The choice the user made.
"""
while True:
if self.clear_screen:
clearScreen()
if not prompt_only:
print(self.__buildDialog())
choice = input(self.input_prompt) # Get the user's choice.
if self.case_sensitive:
if choice in self.list_of_choices.keys():
return choice
else:
if choice.lower() in [ # Convert the keys to lowercase ONLY IF the key is a string.
key.lower() if type(key) is str else key
for key in self.list_of_choices.keys()
]:
return choice
def __str__(self) -> str:
"""
Get a string representation of the dialog.
This will not clear the screen nor ask for the user's input.
:returns: The string representation of the dialog.
"""
return self.__buildDialog()
def __buildDialog(self) -> str:
"""
Build the dialog.
"""
# Center and add title.
result: str = f"\n{self.title.center(shutil.get_terminal_size().columns, self.title_fill_char)}\n\n"
if self.description is not None: # Center and add description.
for desc_line in self.description.split('\n'):
for line in textwrap.wrap(
desc_line,
shutil.get_terminal_size().columns - (self.margin * 2)
):
result += f"{line.center(shutil.get_terminal_size().columns)}\n"
result += '\n'
result += self.getChoicesList()
result += '\n'
return result
def getChoicesList(self) -> str:
"""
Return a string containing the formatted choices list without the title and description.
"""
result: str = ""
# Get the longest key; to be used in formatting the choices.
longest_id = max(
(len(key) if key is not None else 0)
for key in self.list_of_choices.keys()
)
# Format and add choices to result.
for choice_id, choice_description in self.list_of_choices.items():
spacer = ' ' * (self.minimum_spaces + (longest_id - len(str(choice_id))))
result += f"[{choice_id}]{spacer}{choice_description}\n"
return result | PypiClean |
/HTTP-PyServer-0.1.4.3.2.tar.gz/HTTP-PyServer-0.1.4.3.2/README.md | # Project Description
HTTP-PyServer is a simple, and extremely light-weight solution to create powerful projects relying on the web with the fewest lines of code. It uses python built-in packages to host server's and then handle http requests, as well as responses. It's extremely customizable, and allows you do to almost everything you might want.
HTTP-PyServer is also very flexible on your project layout. It ensures security by allowing you to specify which files to send, and to where. HTTP-PyServer allows you to do everything you need whilst keeping hands relaxed with as few lines of code as possible.
# Installing
Install HTTP-PyServer using pip from the command line.
```
python -m pip install HTTP-PyServer
```
# Simple Example
```
# Saved as "main.py"
import server
app = server.Server()
@app.route('/')
def _(request):
return 'Hello, world!'
with app:
app.wait('Press Enter to continue...')
```
```
$ python main.py
Press Enter to continue...
```
# Contributing
Look on github, and create a fork of HTTP-PyServer. Submit, pull requests, and any features will be looked at, and potentially implemented.
# Used Packages
Although, no external packages were used, the following packages were imported. All packages are native in `Python 3.11.2`. Not tested on any other versions, however it should work.
- `threading`
- `socket`
- `typing`
- `logging`
- `pathlib`
- `re`
- `time`
- `json`
- `urllib`
- `mimetypes`
- `enum`
- `ssl`
- `secrets` | PypiClean |
/LinOTP-2.11.1.tar.gz/LinOTP-2.11.1/linotp/provider/voiceprovider/custom_voice_provider.py | import logging
import requests
from linotp.provider import provider_registry
from linotp.provider.provider_base import ProviderBase
from linotp.provider.voiceprovider import TwillioMixin
#
# set the default connection and request timeouts
#
DEFAULT_TIMEOUT = (3, 5)
log = logging.getLogger(__name__)
@provider_registry.class_entry('CustomVoiceProvider')
@provider_registry.class_entry('linotp.provider.CustomVoiceProvider')
@provider_registry.class_entry('linotp.provider.voiceprovider.'
'custom_voice_provider.CustomVoiceProvider')
class CustomVoiceProvider(ProviderBase, TwillioMixin):
"""
Send a Voice notification through the Custom Voice Provider to the
Voice Challenge Service. The CustomVoiceProvider allows to define all
required config definitions directly and expects the following parameter:
{
"server_url": "https://vcs.*/v1/twilio/call"
"access_certificate": "/etc/linotp2/voice-license.pem",
"server_certificate": "/etc/linotp2/keyidentity-voice-ca-bundle.crt"
'callerNumber': '+4989231234567'
}
Part of the config definition is as well the Voice Delivery Service
configuration definition, whereby currently the twilio definition is the
only supported one:
'twilio': {
'accountSid': 'ACf9095f540f0b090edbd239b99230a8ee',
'authToken': '8f36aab7ca485b432500ce49c15280c5'
'voice': 'alice',
}
"""
def __init__(self):
""" """
self.server_url = None
self.client_cert = None
self.server_cert = None
self.proxy = None
self.timeout = DEFAULT_TIMEOUT
self.service_config = {}
self.callerNumber = None
def loadConfig(self, configDict):
"""
Loads the configuration for this Voice notification provider
:param configDict: A dictionary that contains all configuration entries
you defined (e.g. in the linotp.ini file)
{
"server_url":
the voice provider target url,
"access_certificate":
the client certificate
"server_certificate":
server verification certificate
"proxy": '
the proxy url
"timeout":
the http timeout value
"twillioConfig": {
"accountSid":
the account identifier
"authToken":
the authentication token
"voice":
reader's voice - default is 'alice'
"callerNumber":
the number of the originator
}
}
"""
# ------------------------------------------------------------------ --
# define the request calling endpoint and verify the url scheme
if 'server_url' not in configDict:
raise KeyError('missing the required server_url')
self.voice_server_url = CustomVoiceProvider.load_server_url(configDict)
# ------------------------------------------------------------------ --
#
# for authentication on the vcs we require a client certificate
#
self.client_cert = CustomVoiceProvider.load_client_cert(configDict)
# ------------------------------------------------------------------ --
#
# default is no server verification, but if provided
# it must be either a file or directory reference
#
self.server_cert = CustomVoiceProvider.load_server_cert(configDict)
# ------------------------------------------------------------------ --
# timeout could be a tuple of network timeout or connection timeout
self.timeout = CustomVoiceProvider.load_timeout(
configDict,
DEFAULT_TIMEOUT)
# ------------------------------------------------------------------ --
#
# we support proxy configuration, whereby here 'requests'
# distinguishes between http and https proxies, which are provided
# in a dicitionary to the request api
#
self.proxy = CustomVoiceProvider.load_proxy(configDict)
# ------------------------------------------------------------------ --
# load the voice message delivery service configuration
delivery_service = configDict.get("twilioConfig")
if not delivery_service:
raise KeyError("Missing delivery service configuration: "
"twillioConfig")
# prepare the twilio voice provider
# . . . other voice services will follow here
twilio_config = CustomVoiceProvider.load_twilio_definition(configDict)
if twilio_config:
self.service_config.update(twilio_config)
return
def submitVoiceMessage(self, calleeNumber, messageTemplate, otp, locale):
"""
Sends out the voice notification message.
{
'call':
{
'calleeNumber': '+4917012345678',
'messageTemplate': 'Hi! Your otp is {otp}'
'otp': '98018932'
'locale': 'en',
}
}
the other information is joined in the lower level of the http call
:param calleeNumber: the destination phone number
:param messageTemplate: the message text containing the placeholder for
the otp
:param otp: the otp
:param locale: the language of the voice reader
:return: A tuple of success and result message
"""
if not calleeNumber:
raise Exception("Missing target number!")
if not messageTemplate:
raise Exception("No message to submit!")
if '{otp}' not in messageTemplate:
log.warning("Missing '{otp}' in messageTemplate: %r",
messageTemplate)
if not otp:
raise Exception("Missing otp value!")
if not locale:
locale = "en"
# ----------------------------------------------------------------- --
# combine the call parameters from request and configuration into
# the json call document
call = {
'calleeNumber': calleeNumber,
'messageTemplate': messageTemplate,
'otp': otp,
'locale': locale}
# add the voice delivery service (twilio) specific data
call.update(self.service_config)
# ----------------------------------------------------------------- --
# run the request against the vcs
return self._make_http_post_request_(json={'call': call})
def _make_http_post_request_(self, json=None):
"""
lower layer for the http post request to support json
document submission
:param json: json document for POST body
:return: response and result tuple
"""
# adjust HTTP header for submitting the json body
headers = {
'Content-type': 'application/json',
'Accept': 'text/plain'}
pparams = {}
if self.timeout:
pparams['timeout'] = self.timeout
try: # submit the POST request
http_session = self._create_http_session_()
response = http_session.post(self.voice_server_url,
json=json,
headers=headers,
**pparams)
if not response.ok:
result = response.reason
else:
result = response.content
finally:
log.debug("leaving voice token provider")
return response.ok, result
# ------------------------------------------------------------------ --
def _create_http_session_(self):
"""
create the http session with certificates and proxy
:return: the http session object
"""
http_session = requests.Session()
# -------------------------------------------------------------- --
# add the proxy if defined
if self.proxy:
http_session.proxies.update(self.proxy)
# -------------------------------------------------------------- --
# add the client certificate if defined
if self.client_cert:
http_session.cert = self.client_cert
# -------------------------------------------------------------- --
# add the server cert to support the server verification if avail
server_cert = self.server_cert
if server_cert is not None:
# Session.post() doesn't like unicode values in Session.verify
if isinstance(server_cert, unicode):
server_cert = server_cert.encode('utf-8')
http_session.verify = server_cert
return http_session
def test_connection(self):
"""
to test the connection, we just call the same endpoint without
arguments (empty document), which will raise an error 400
"""
status, response = self._make_http_post_request_(json={})
if response == 'Bad Request':
return True, response
return False, response
# eof | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dijit/form/_FormWidget.js | if(!dojo._hasResource["dijit.form._FormWidget"]){
dojo._hasResource["dijit.form._FormWidget"]=true;
dojo.provide("dijit.form._FormWidget");
dojo.require("dijit._Widget");
dojo.require("dijit._Templated");
dojo.declare("dijit.form._FormWidget",[dijit._Widget,dijit._Templated],{baseClass:"",name:"",alt:"",value:"",type:"text",tabIndex:"0",disabled:false,intermediateChanges:false,scrollOnFocus:true,attributeMap:dojo.delegate(dijit._Widget.prototype.attributeMap,{value:"focusNode",id:"focusNode",tabIndex:"focusNode",alt:"focusNode",title:"focusNode"}),postMixInProperties:function(){
this.nameAttrSetting=this.name?("name='"+this.name+"'"):"";
this.inherited(arguments);
},_setDisabledAttr:function(_1){
this.disabled=_1;
dojo.attr(this.focusNode,"disabled",_1);
if(this.valueNode){
dojo.attr(this.valueNode,"disabled",_1);
}
dijit.setWaiState(this.focusNode,"disabled",_1);
if(_1){
this._hovering=false;
this._active=false;
this.focusNode.setAttribute("tabIndex","-1");
}else{
this.focusNode.setAttribute("tabIndex",this.tabIndex);
}
this._setStateClass();
},setDisabled:function(_2){
dojo.deprecated("setDisabled("+_2+") is deprecated. Use attr('disabled',"+_2+") instead.","","2.0");
this.attr("disabled",_2);
},_onFocus:function(e){
if(this.scrollOnFocus){
dijit.scrollIntoView(this.domNode);
}
this.inherited(arguments);
},_onMouse:function(_3){
var _4=_3.currentTarget;
if(_4&&_4.getAttribute){
this.stateModifier=_4.getAttribute("stateModifier")||"";
}
if(!this.disabled){
switch(_3.type){
case "mouseenter":
case "mouseover":
this._hovering=true;
this._active=this._mouseDown;
break;
case "mouseout":
case "mouseleave":
this._hovering=false;
this._active=false;
break;
case "mousedown":
this._active=true;
this._mouseDown=true;
var _5=this.connect(dojo.body(),"onmouseup",function(){
if(this._mouseDown&&this.isFocusable()){
this.focus();
}
this._active=false;
this._mouseDown=false;
this._setStateClass();
this.disconnect(_5);
});
break;
}
this._setStateClass();
}
},isFocusable:function(){
return !this.disabled&&!this.readOnly&&this.focusNode&&(dojo.style(this.domNode,"display")!="none");
},focus:function(){
dijit.focus(this.focusNode);
},_setStateClass:function(){
var _6=this.baseClass.split(" ");
function _7(_8){
_6=_6.concat(dojo.map(_6,function(c){
return c+_8;
}),"dijit"+_8);
};
if(this.checked){
_7("Checked");
}
if(this.state){
_7(this.state);
}
if(this.selected){
_7("Selected");
}
if(this.disabled){
_7("Disabled");
}else{
if(this.readOnly){
_7("ReadOnly");
}else{
if(this._active){
_7(this.stateModifier+"Active");
}else{
if(this._focused){
_7("Focused");
}
if(this._hovering){
_7(this.stateModifier+"Hover");
}
}
}
}
var tn=this.stateNode||this.domNode,_9={};
dojo.forEach(tn.className.split(" "),function(c){
_9[c]=true;
});
if("_stateClasses" in this){
dojo.forEach(this._stateClasses,function(c){
delete _9[c];
});
}
dojo.forEach(_6,function(c){
_9[c]=true;
});
var _a=[];
for(var c in _9){
_a.push(c);
}
tn.className=_a.join(" ");
this._stateClasses=_6;
},compare:function(_b,_c){
if(typeof _b=="number"&&typeof _c=="number"){
return (isNaN(_b)&&isNaN(_c))?0:_b-_c;
}else{
if(_b>_c){
return 1;
}else{
if(_b<_c){
return -1;
}else{
return 0;
}
}
}
},onChange:function(_d){
},_onChangeActive:false,_handleOnChange:function(_e,_f){
this._lastValue=_e;
if(this._lastValueReported==undefined&&(_f===null||!this._onChangeActive)){
this._resetValue=this._lastValueReported=_e;
}
if((this.intermediateChanges||_f||_f===undefined)&&((typeof _e!=typeof this._lastValueReported)||this.compare(_e,this._lastValueReported)!=0)){
this._lastValueReported=_e;
if(this._onChangeActive){
if(this._onChangeHandle){
clearTimeout(this._onChangeHandle);
}
this._onChangeHandle=setTimeout(dojo.hitch(this,function(){
this._onChangeHandle=null;
this.onChange(_e);
}),0);
}
}
},create:function(){
this.inherited(arguments);
this._onChangeActive=true;
this._setStateClass();
},destroy:function(){
if(this._onChangeHandle){
clearTimeout(this._onChangeHandle);
this.onChange(this._lastValueReported);
}
this.inherited(arguments);
},setValue:function(_10){
dojo.deprecated("dijit.form._FormWidget:setValue("+_10+") is deprecated. Use attr('value',"+_10+") instead.","","2.0");
this.attr("value",_10);
},getValue:function(){
dojo.deprecated(this.declaredClass+"::getValue() is deprecated. Use attr('value') instead.","","2.0");
return this.attr("value");
}});
dojo.declare("dijit.form._FormValueWidget",dijit.form._FormWidget,{readOnly:false,attributeMap:dojo.delegate(dijit.form._FormWidget.prototype.attributeMap,{value:"",readOnly:"focusNode"}),_setReadOnlyAttr:function(_11){
this.readOnly=_11;
dojo.attr(this.focusNode,"readOnly",_11);
dijit.setWaiState(this.focusNode,"readonly",_11);
this._setStateClass();
},postCreate:function(){
if(dojo.isIE){
this.connect(this.focusNode||this.domNode,"onkeydown",this._onKeyDown);
}
if(this._resetValue===undefined){
this._resetValue=this.value;
}
},_setValueAttr:function(_12,_13){
this.value=_12;
this._handleOnChange(_12,_13);
},_getValueAttr:function(){
return this._lastValue;
},undo:function(){
this._setValueAttr(this._lastValueReported,false);
},reset:function(){
this._hasBeenBlurred=false;
this._setValueAttr(this._resetValue,true);
},_onKeyDown:function(e){
if(e.keyCode==dojo.keys.ESCAPE&&!(e.ctrlKey||e.altKey||e.metaKey)){
var te;
if(dojo.isIE){
e.preventDefault();
te=document.createEventObject();
te.keyCode=dojo.keys.ESCAPE;
te.shiftKey=e.shiftKey;
e.srcElement.fireEvent("onkeypress",te);
}
}
},_layoutHackIE7:function(){
if(dojo.isIE==7){
var _14=this.domNode;
var _15=_14.parentNode;
var _16=_14.firstChild||_14;
var _17=_16.style.filter;
while(_15&&_15.clientHeight==0){
_15._disconnectHandle=this.connect(_15,"onscroll",dojo.hitch(this,function(e){
this.disconnect(_15._disconnectHandle);
_15.removeAttribute("_disconnectHandle");
_16.style.filter=(new Date()).getMilliseconds();
setTimeout(function(){
_16.style.filter=_17;
},0);
}));
_15=_15.parentNode;
}
}
}});
} | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/worker/replay_buffer/utils.py | from typing import Any
import time
from queue import Queue
from typing import Union, Tuple
from threading import Thread
from functools import partial
from ding.utils.autolog import LoggedValue, LoggedModel
from ding.utils import LockContext, LockContextType, remove_file
def generate_id(name, data_id: int) -> str:
"""
Overview:
Use ``self.name`` and input ``id`` to generate a unique id for next data to be inserted.
Arguments:
- data_id (:obj:`int`): Current unique id.
Returns:
- id (:obj:`str`): Id in format "BufferName_DataId".
"""
return "{}_{}".format(name, str(data_id))
class UsedDataRemover:
"""
Overview:
UsedDataRemover is a tool to remove file datas that will no longer be used anymore.
Interface:
start, close, add_used_data
"""
def __init__(self) -> None:
self._used_data = Queue()
self._delete_used_data_thread = Thread(target=self._delete_used_data, name='delete_used_data')
self._delete_used_data_thread.daemon = True
self._end_flag = True
def start(self) -> None:
"""
Overview:
Start the `delete_used_data` thread.
"""
self._end_flag = False
self._delete_used_data_thread.start()
def close(self) -> None:
"""
Overview:
Delete all datas in `self._used_data`. Then join the `delete_used_data` thread.
"""
while not self._used_data.empty():
data_id = self._used_data.get()
remove_file(data_id)
self._end_flag = True
def add_used_data(self, data: Any) -> None:
"""
Overview:
Delete all datas in `self._used_data`. Then join the `delete_used_data` thread.
Arguments:
- data (:obj:`Any`): Add a used data item into `self._used_data` for further remove.
"""
assert data is not None and isinstance(data, dict) and 'data_id' in data
self._used_data.put(data['data_id'])
def _delete_used_data(self) -> None:
while not self._end_flag:
if not self._used_data.empty():
data_id = self._used_data.get()
remove_file(data_id)
else:
time.sleep(0.001)
class SampledDataAttrMonitor(LoggedModel):
"""
Overview:
SampledDataAttrMonitor is to monitor read-out indicators for ``expire`` times recent read-outs.
Indicators include: read out time; average and max of read out data items' use; average, max and min of
read out data items' priorityl; average and max of staleness.
Interface:
__init__, fixed_time, current_time, freeze, unfreeze, register_attribute_value, __getattr__
Property:
time, expire
"""
use_max = LoggedValue(int)
use_avg = LoggedValue(float)
priority_max = LoggedValue(float)
priority_avg = LoggedValue(float)
priority_min = LoggedValue(float)
staleness_max = LoggedValue(int)
staleness_avg = LoggedValue(float)
def __init__(self, time_: 'BaseTime', expire: Union[int, float]): # noqa
LoggedModel.__init__(self, time_, expire)
self.__register()
def __register(self):
def __avg_func(prop_name: str) -> float:
records = self.range_values[prop_name]()
_list = [_value for (_begin_time, _end_time), _value in records]
return sum(_list) / len(_list) if len(_list) != 0 else 0
def __max_func(prop_name: str) -> Union[float, int]:
records = self.range_values[prop_name]()
_list = [_value for (_begin_time, _end_time), _value in records]
return max(_list) if len(_list) != 0 else 0
def __min_func(prop_name: str) -> Union[float, int]:
records = self.range_values[prop_name]()
_list = [_value for (_begin_time, _end_time), _value in records]
return min(_list) if len(_list) != 0 else 0
self.register_attribute_value('avg', 'use', partial(__avg_func, prop_name='use_avg'))
self.register_attribute_value('max', 'use', partial(__max_func, prop_name='use_max'))
self.register_attribute_value('avg', 'priority', partial(__avg_func, prop_name='priority_avg'))
self.register_attribute_value('max', 'priority', partial(__max_func, prop_name='priority_max'))
self.register_attribute_value('min', 'priority', partial(__min_func, prop_name='priority_min'))
self.register_attribute_value('avg', 'staleness', partial(__avg_func, prop_name='staleness_avg'))
self.register_attribute_value('max', 'staleness', partial(__max_func, prop_name='staleness_max'))
class PeriodicThruputMonitor:
"""
Overview:
PeriodicThruputMonitor is a tool to record and print logs(text & tensorboard) how many datas are
pushed/sampled/removed/valid in a period of time. For tensorboard, you can view it in 'buffer_{$NAME}_sec'.
Interface:
close
Property:
push_data_count, sample_data_count, remove_data_count, valid_count
.. note::
`thruput_log` thread is initialized and started in `__init__` method, so PeriodicThruputMonitor only provide
one signle interface `close`
"""
def __init__(self, name, cfg, logger, tb_logger) -> None:
self.name = name
self._end_flag = False
self._logger = logger
self._tb_logger = tb_logger
self._thruput_print_seconds = cfg.seconds
self._thruput_print_times = 0
self._thruput_start_time = time.time()
self._history_push_count = 0
self._history_sample_count = 0
self._remove_data_count = 0
self._valid_count = 0
self._thruput_log_thread = Thread(target=self._thrput_print_periodically, args=(), name='periodic_thruput_log')
self._thruput_log_thread.daemon = True
self._thruput_log_thread.start()
def _thrput_print_periodically(self) -> None:
while not self._end_flag:
time_passed = time.time() - self._thruput_start_time
if time_passed >= self._thruput_print_seconds:
self._logger.info('In the past {:.1f} seconds, buffer statistics is as follows:'.format(time_passed))
count_dict = {
'pushed_in': self._history_push_count,
'sampled_out': self._history_sample_count,
'removed': self._remove_data_count,
'current_have': self._valid_count,
}
self._logger.info(self._logger.get_tabulate_vars_hor(count_dict))
for k, v in count_dict.items():
self._tb_logger.add_scalar('{}_sec/'.format(self.name) + k, v, self._thruput_print_times)
self._history_push_count = 0
self._history_sample_count = 0
self._remove_data_count = 0
self._thruput_start_time = time.time()
self._thruput_print_times += 1
else:
time.sleep(min(1, self._thruput_print_seconds * 0.2))
def close(self) -> None:
"""
Overview:
Join the `thruput_log` thread by setting `self._end_flag` to `True`.
"""
self._end_flag = True
def __del__(self) -> None:
self.close()
@property
def push_data_count(self) -> int:
return self._history_push_count
@push_data_count.setter
def push_data_count(self, count) -> None:
self._history_push_count = count
@property
def sample_data_count(self) -> int:
return self._history_sample_count
@sample_data_count.setter
def sample_data_count(self, count) -> None:
self._history_sample_count = count
@property
def remove_data_count(self) -> int:
return self._remove_data_count
@remove_data_count.setter
def remove_data_count(self, count) -> None:
self._remove_data_count = count
@property
def valid_count(self) -> int:
return self._valid_count
@valid_count.setter
def valid_count(self, count) -> None:
self._valid_count = count
class ThruputController:
def __init__(self, cfg) -> None:
self._push_sample_rate_limit = cfg.push_sample_rate_limit
assert 'min' in self._push_sample_rate_limit and self._push_sample_rate_limit['min'] >= 0
assert 'max' in self._push_sample_rate_limit and self._push_sample_rate_limit['max'] <= float("inf")
window_seconds = cfg.window_seconds
self._decay_factor = 0.01 ** (1 / window_seconds)
self._push_lock = LockContext(type_=LockContextType.THREAD_LOCK)
self._sample_lock = LockContext(type_=LockContextType.THREAD_LOCK)
self._history_push_count = 0
self._history_sample_count = 0
self._end_flag = False
self._count_decay_thread = Thread(target=self._count_decay, name='count_decay')
self._count_decay_thread.daemon = True
self._count_decay_thread.start()
def _count_decay(self) -> None:
while not self._end_flag:
time.sleep(1)
with self._push_lock:
self._history_push_count *= self._decay_factor
with self._sample_lock:
self._history_sample_count *= self._decay_factor
def can_push(self, push_size: int) -> Tuple[bool, str]:
if abs(self._history_sample_count) < 1e-5:
return True, "Can push because `self._history_sample_count` < 1e-5"
rate = (self._history_push_count + push_size) / self._history_sample_count
if rate > self._push_sample_rate_limit['max']:
return False, "push({}+{}) / sample({}) > limit_max({})".format(
self._history_push_count, push_size, self._history_sample_count, self._push_sample_rate_limit['max']
)
return True, "Can push."
def can_sample(self, sample_size: int) -> Tuple[bool, str]:
rate = self._history_push_count / (self._history_sample_count + sample_size)
if rate < self._push_sample_rate_limit['min']:
return False, "push({}) / sample({}+{}) < limit_min({})".format(
self._history_push_count, self._history_sample_count, sample_size, self._push_sample_rate_limit['min']
)
return True, "Can sample."
def close(self) -> None:
self._end_flag = True
@property
def history_push_count(self) -> int:
return self._history_push_count
@history_push_count.setter
def history_push_count(self, count) -> None:
with self._push_lock:
self._history_push_count = count
@property
def history_sample_count(self) -> int:
return self._history_sample_count
@history_sample_count.setter
def history_sample_count(self, count) -> None:
with self._sample_lock:
self._history_sample_count = count | PypiClean |
/Mesa_Adapted-0.8.7.3-py3-none-any.whl/mesa/visualization/ModularVisualization.py | import os
import tornado.autoreload
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.escape
import tornado.gen
import webbrowser
from mesa.visualization.UserParam import UserSettableParameter
# Suppress several pylint warnings for this file.
# Attributes being defined outside of init is a Tornado feature.
# pylint: disable=attribute-defined-outside-init
class VisualizationElement:
"""
Defines an element of the visualization.
Attributes:
package_includes: A list of external JavaScript files to include that
are part of the Mesa packages.
local_includes: A list of JavaScript files that are local to the
directory that the server is being run in.
js_code: A JavaScript code string to instantiate the element.
Methods:
render: Takes a model object, and produces JSON data which can be sent
to the client.
"""
package_includes = []
local_includes = []
js_code = ""
render_args = {}
def __init__(self):
pass
def render(self, model):
""" Build visualization data from a model object.
Args:
model: A model object
Returns:
A JSON-ready object.
"""
return "<b>VisualizationElement goes here</b>."
# =============================================================================
# Actual Tornado code starts here:
class PageHandler(tornado.web.RequestHandler):
""" Handler for the HTML template which holds the visualization. """
def get(self):
elements = self.application.visualization_elements
for i, element in enumerate(elements):
element.index = i
self.render(
"modular_template.html",
port=self.application.port,
model_name=self.application.model_name,
description=self.application.description,
package_includes=self.application.package_includes,
local_includes=self.application.local_includes,
scripts=self.application.js_code,
)
class SocketHandler(tornado.websocket.WebSocketHandler):
""" Handler for websocket. """
def open(self):
if self.application.verbose:
print("Socket opened!")
self.write_message(
{"type": "model_params", "params": self.application.user_params}
)
def check_origin(self, origin):
return True
@property
def viz_state_message(self):
return {"type": "viz_state", "data": self.application.render_model()}
def on_message(self, message):
""" Receiving a message from the websocket, parse, and act accordingly.
"""
if self.application.verbose:
print(message)
msg = tornado.escape.json_decode(message)
if msg["type"] == "get_step":
if not self.application.model.running:
self.write_message({"type": "end"})
else:
self.application.model.step()
self.write_message(self.viz_state_message)
elif msg["type"] == "reset":
self.application.reset_model()
self.write_message(self.viz_state_message)
elif msg["type"] == "submit_params":
param = msg["param"]
value = msg["value"]
# Is the param editable?
if param in self.application.user_params:
if isinstance(
self.application.model_kwargs[param], UserSettableParameter
):
self.application.model_kwargs[param].value = value
else:
self.application.model_kwargs[param] = value
else:
if self.application.verbose:
print("Unexpected message!")
class ModularServer(tornado.web.Application):
""" Main visualization application. """
verbose = True
port = 8521 # Default port to listen on
max_steps = 100000
# Handlers and other globals:
page_handler = (r"/", PageHandler)
socket_handler = (r"/ws", SocketHandler)
static_handler = (
r"/static/(.*)",
tornado.web.StaticFileHandler,
{"path": os.path.dirname(__file__) + "/templates"},
)
local_handler = (r"/local/(.*)", tornado.web.StaticFileHandler, {"path": ""})
handlers = [page_handler, socket_handler, static_handler, local_handler]
settings = {
"debug": True,
"autoreload": False,
"template_path": os.path.dirname(__file__) + "/templates",
}
EXCLUDE_LIST = ("width", "height")
def __init__(
self, model_cls, visualization_elements, name="Mesa Model", model_params={}
):
""" Create a new visualization server with the given elements. """
# Prep visualization elements:
self.visualization_elements = visualization_elements
self.package_includes = set()
self.local_includes = set()
self.js_code = []
for element in self.visualization_elements:
for include_file in element.package_includes:
self.package_includes.add(include_file)
for include_file in element.local_includes:
self.local_includes.add(include_file)
self.js_code.append(element.js_code)
# Initializing the model
self.model_name = name
self.model_cls = model_cls
self.description = "No description available"
if hasattr(model_cls, "description"):
self.description = model_cls.description
elif model_cls.__doc__ is not None:
self.description = model_cls.__doc__
self.model_kwargs = model_params
self.reset_model()
# Initializing the application itself:
super().__init__(self.handlers, **self.settings)
@property
def user_params(self):
result = {}
for param, val in self.model_kwargs.items():
if isinstance(val, UserSettableParameter):
result[param] = val.json
return result
def reset_model(self):
""" Reinstantiate the model object, using the current parameters. """
model_params = {}
for key, val in self.model_kwargs.items():
if isinstance(val, UserSettableParameter):
if (
val.param_type == "static_text"
): # static_text is never used for setting params
continue
model_params[key] = val.value
else:
model_params[key] = val
self.model = self.model_cls(**model_params)
def render_model(self):
""" Turn the current state of the model into a dictionary of
visualizations
"""
visualization_state = []
for element in self.visualization_elements:
element_state = element.render(self.model)
visualization_state.append(element_state)
return visualization_state
def launch(self, port=None, open_browser=True):
""" Run the app. """
if port is not None:
self.port = port
url = "http://127.0.0.1:{PORT}".format(PORT=self.port)
print("Interface starting at {url}".format(url=url))
self.listen(self.port)
if open_browser:
webbrowser.open(url)
tornado.autoreload.start()
tornado.ioloop.IOLoop.current().start() | PypiClean |
/Argonaut-0.3.4.tar.gz/Argonaut-0.3.4/argonaut/public/ckeditor/themes/default/theme.js | /*
Copyright (c) 2003-2010, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.themes.add('default',(function(){function a(b,c){var d,e;e=b.config.sharedSpaces;e=e&&e[c];e=e&&CKEDITOR.document.getById(e);if(e){var f='<span class="cke_shared"><span class="'+b.skinClass+' cke_editor_'+b.name+'">'+'<span class="'+CKEDITOR.env.cssClass+'">'+'<span class="cke_wrapper cke_'+b.lang.dir+'">'+'<span class="cke_editor">'+'<div class="cke_'+c+'">'+'</div></span></span></span></span></span>',g=e.append(CKEDITOR.dom.element.createFromHtml(f,e.getDocument()));if(e.getCustomData('cke_hasshared'))g.hide();else e.setCustomData('cke_hasshared',1);d=g.getChild([0,0,0,0]);b.on('focus',function(){for(var h=0,i,j=e.getChildren();i=j.getItem(h);h++){if(i.type==CKEDITOR.NODE_ELEMENT&&!i.equals(g)&&i.hasClass('cke_shared'))i.hide();}g.show();});b.on('destroy',function(){g.remove();});}return d;};return{build:function(b,c){var d=b.name,e=b.element,f=b.elementMode;if(!e||f==CKEDITOR.ELEMENT_MODE_NONE)return;if(f==CKEDITOR.ELEMENT_MODE_REPLACE)e.hide();var g=b.fire('themeSpace',{space:'top',html:''}).html,h=b.fire('themeSpace',{space:'contents',html:''}).html,i=b.fireOnce('themeSpace',{space:'bottom',html:''}).html,j=h&&b.config.height,k=b.config.tabIndex||b.element.getAttribute('tabindex')||0;if(!h)j='auto';else if(!isNaN(j))j+='px';var l='',m=b.config.width;if(m){if(!isNaN(m))m+='px';l+='width: '+m+';';}var n=g&&a(b,'top'),o=a(b,'bottom');n&&(n.setHtml(g),g='');o&&(o.setHtml(i),i='');var p=CKEDITOR.dom.element.createFromHtml(['<span id="cke_',d,'" onmousedown="return false;" class="',b.skinClass,' cke_editor_',d,'" dir="',b.lang.dir,'" title="',CKEDITOR.env.gecko?' ':'','" lang="',b.langCode,'"'+(CKEDITOR.env.webkit?' tabindex="'+k+'"':'')+' role="application"'+' aria-labelledby="cke_',d,'_arialbl"'+(l?' style="'+l+'"':'')+'>'+'<span id="cke_',d,'_arialbl" class="cke_voice_label">'+b.lang.editor+'</span>'+'<span class="',CKEDITOR.env.cssClass,'" role="presentation"><span class="cke_wrapper cke_',b.lang.dir,'" role="presentation"><table class="cke_editor" border="0" cellspacing="0" cellpadding="0" role="presentation"><tbody><tr',g?'':' style="display:none"',' role="presentation"><td id="cke_top_',d,'" class="cke_top" role="presentation">',g,'</td></tr><tr',h?'':' style="display:none"',' role="presentation"><td id="cke_contents_',d,'" class="cke_contents" style="height:',j,'" role="presentation">',h,'</td></tr><tr',i?'':' style="display:none"',' role="presentation"><td id="cke_bottom_',d,'" class="cke_bottom" role="presentation">',i,'</td></tr></tbody></table><style>.',b.skinClass,'{visibility:hidden;}</style></span></span></span>'].join(''));
p.getChild([1,0,0,0,0]).unselectable();p.getChild([1,0,0,0,2]).unselectable();if(f==CKEDITOR.ELEMENT_MODE_REPLACE)p.insertAfter(e);else e.append(p);b.container=p;p.disableContextMenu();b.fireOnce('themeLoaded');b.fireOnce('uiReady');},buildDialog:function(b){var c=CKEDITOR.tools.getNextNumber(),d=CKEDITOR.dom.element.createFromHtml(['<div class="cke_editor_'+b.name.replace('.','\\.')+'_dialog cke_skin_',b.skinName,'" dir="',b.lang.dir,'" lang="',b.langCode,'" role="dialog" aria-labelledby="%title#"><table class="cke_dialog',' '+CKEDITOR.env.cssClass,' cke_',b.lang.dir,'" style="position:absolute" role="presentation"><tr><td role="presentation"><div class="%body" role="presentation"><div id="%title#" class="%title" role="presentation"></div><a id="%close_button#" class="%close_button" href="javascript:void(0)" title="'+b.lang.common.close+'" role="button"><span class="cke_label">X</span></a>'+'<div id="%tabs#" class="%tabs" role="tablist"></div>'+'<table class="%contents" role="presentation"><tr>'+'<td id="%contents#" class="%contents" role="presentation"></td>'+'</tr></table>'+'<div id="%footer#" class="%footer" role="presentation"></div>'+'</div>'+'<div id="%tl#" class="%tl"></div>'+'<div id="%tc#" class="%tc"></div>'+'<div id="%tr#" class="%tr"></div>'+'<div id="%ml#" class="%ml"></div>'+'<div id="%mr#" class="%mr"></div>'+'<div id="%bl#" class="%bl"></div>'+'<div id="%bc#" class="%bc"></div>'+'<div id="%br#" class="%br"></div>'+'</td></tr>'+'</table>',CKEDITOR.env.ie?'':'<style>.cke_dialog{visibility:hidden;}</style>','</div>'].join('').replace(/#/g,'_'+c).replace(/%/g,'cke_dialog_')),e=d.getChild([0,0,0,0,0]),f=e.getChild(0),g=e.getChild(1);f.unselectable();g.unselectable();return{element:d,parts:{dialog:d.getChild(0),title:f,close:g,tabs:e.getChild(2),contents:e.getChild([3,0,0,0]),footer:e.getChild(4)}};},destroy:function(b){var c=b.container;c.clearCustomData();b.element.clearCustomData();if(c)c.remove();if(b.elementMode==CKEDITOR.ELEMENT_MODE_REPLACE)b.element.show();delete b.element;}};})());CKEDITOR.editor.prototype.getThemeSpace=function(a){var b='cke_'+a,c=this._[b]||(this._[b]=CKEDITOR.document.getById(b+'_'+this.name));return c;};CKEDITOR.editor.prototype.resize=function(a,b,c,d){var e=/^\d+$/;if(e.test(a))a+='px';var f=this.container,g=CKEDITOR.document.getById('cke_contents_'+this.name),h=d?f.getChild(1):f;CKEDITOR.env.webkit&&h.setStyle('display','none');h.setStyle('width',a);if(CKEDITOR.env.webkit){h.$.offsetWidth;h.setStyle('display','');
}var i=c?0:(h.$.offsetHeight||0)-(g.$.clientHeight||0);g.setStyle('height',Math.max(b-i,0)+'px');this.fire('resize');};CKEDITOR.editor.prototype.getResizable=function(){return this.container.getChild(1);}; | PypiClean |
/Lib_HaL9k-0.7.0-py3-none-any.whl/hal9k/track.py |
import virtualbox
from virtualbox.library import LockType, VBoxErrorInvalidObjectState
class TrackException(Exception):
"""Custom exception for the track class."""
class Track:
"""The Track Class."""
def __init__(self, track_name, vbox):
"""Initialize the Track class."""
self.__vbox = vbox
self.__session = virtualbox.Session()
self.__machine = self.__vbox.find_machine(track_name)
def __enter__(self):
"""Work with context managers."""
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Work with context managers."""
def play(self):
"""Start the VM in headless mode."""
try:
progress = self.__machine.launch_vm_process(
self.__session, "headless", ""
)
progress.wait_for_completion()
except VBoxErrorInvalidObjectState:
raise TrackException(
"Could not play track. (Track currently playing.)"
)
def rewind(self):
"""Revert the VM to the PRODUCTION snapshot."""
try:
self.__machine.lock_machine(self.__session, LockType(2))
snapshot = self.__machine.find_snapshot("PRODUCTION")
progress = self.__session.machine.restore_snapshot(snapshot)
progress.wait_for_completion()
self.__session.unlock_machine()
except VBoxErrorInvalidObjectState:
raise TrackException(
"Could not rewind track. (Track currently playing.)"
)
def status(self):
"""Check the VM status."""
machine_state_return_value = [
# -1: Error, 0: Stopped, 1: Running, 2: Rewinding, 3: Busy
-1, # 0: Null (never used by API)
0, # 1: Powered Off
0, # 2: Saved
-1, # 3: Teleported
0, # 4: Aborted
1, # 5: Running
-1, # 6: Paused
-1, # 7: Stuck
-1, # 8: Teleporting
3, # 9: Live Snapshotting
3, # 10: Starting
3, # 11: Stopping
3, # 12: Saving
3, # 13: Restoring
-1, # 14: Teleporting Paused VM
-1, # 15: Teleporting In
1, # 16: Deleting Snapshot Online
-1, # 17: Deleting Snapshot Paused
-1, # 18: Online Snapshotting
2, # 19: Restoring Snapshot
0, # 20: Deleting Snapshot
-1, # 21: Setting Up
0, # 22: Offline Snapshotting
]
return machine_state_return_value[int(self.__machine.state)]
def stop(self):
"""Stop the VM."""
try:
progress = self.__session.console.power_down()
progress.wait_for_completion()
except Exception as exception:
try:
if (
exception.errno == -2147418113
or exception.errno == 2147549183
):
raise TrackException(
"Could not stop track. (Track already stopped.)"
)
print(f"Unknown Error Number: {exception.errno}")
raise TrackException(
"Could not stop track. (Unknown error number.)"
)
except AttributeError:
raise TrackException("Could not stop track. (Unknown error.)") | PypiClean |
/Houndify-2.1.0.tar.gz/Houndify-2.1.0/houndify/sample_wave.py | import houndify
import argparse
import sys
import time
import wave
import json
BUFFER_SIZE = 256
#
# Simplest HoundListener; just print out what we receive.
# You can use these callbacks to interact with your UI.
#
class MyListener(houndify.HoundListener):
def __init__(self):
pass
def onPartialTranscript(self, transcript):
print("Partial transcript: " + transcript)
def onFinalResponse(self, response):
print("Final response:")
print(json.dumps(response, indent=2, sort_keys=True, ensure_ascii=False))
def onError(self, err):
print("Error: " + str(err))
def check_audio_compatibility(audio):
if audio.getsampwidth() != 2:
print("{}: wrong sample width (must be 16-bit)".format(fname))
sys.exit()
if audio.getframerate() != 8000 and audio.getframerate() != 16000:
print("{}: unsupported sampling frequency (must be either 8 or 16 khz)".format(fname))
sys.exit()
if audio.getnchannels() != 1:
print("{}: must be single channel (mono)".format(fname))
sys.exit()
def send_audio_file(audio_file, client_id, client_key):
audio = wave.open(audio_file)
check_audio_compatibility(audio)
client = houndify.StreamingHoundClient(client_id, client_key, "test_user")
client.setLocation(37.388309, -121.973968)
client.setSampleRate(audio.getframerate())
client.start(MyListener())
while True:
chunk_start = time.time()
samples = audio.readframes(BUFFER_SIZE)
chunk_duration = float(len(samples)) / (audio.getframerate() * audio.getsampwidth())
if len(samples) == 0: break
if client.fill(samples): break
# # Uncomment the line below to simulate real-time request
# time.sleep(chunk_duration - time.time() + chunk_start)
audio.close()
response = client.finish() # returns either final response or error
return response
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('AUDIO_FILE', type=str,
help='Audio .wav file to be sent to the server')
parser.add_argument('--endpoint', '-e', default='https://api.houndify.com/v1/audio',
help="The endpoint the SDK will hit to query Houndify.")
parser.add_argument('--client-id', '-id', required=True,
help="Houndify client ID")
parser.add_argument('--client-key', '-key', required=True,
help="Houndify client Key")
args = parser.parse_args()
response = send_audio_file(args.AUDIO_FILE, args.client_id, args.client_key) | PypiClean |
/MirrorHerokuX-6.0.3-py3-none-any.whl/bot/helper/mirror_utils/download_utils/aria2_download.py | from bot import aria2, download_dict_lock, STOP_DUPLICATE_MIRROR
from bot.helper.mirror_utils.upload_utils.gdriveTools import GoogleDriveHelper
from bot.helper.ext_utils.bot_utils import *
from .download_helper import DownloadHelper
from bot.helper.mirror_utils.status_utils.aria_download_status import AriaDownloadStatus
from bot.helper.telegram_helper.message_utils import *
import threading
from aria2p import API
from time import sleep
class AriaDownloadHelper(DownloadHelper):
def __init__(self):
super().__init__()
@new_thread
def __onDownloadStarted(self, api, gid):
sleep(1)
LOGGER.info(f"onDownloadStart: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
self.name = download.name
sname = download.name
if STOP_DUPLICATE_MIRROR:
if dl.getListener().isTar == True:
sname = sname + ".tar"
if dl.getListener().extract == True:
smsg = None
else:
gdrive = GoogleDriveHelper(None)
smsg, button = gdrive.drive_list(sname)
if smsg:
dl.getListener().onDownloadError(f'File is already available in drive.This download has been stopped.\n\n')
sendMarkup("Here are the search results:", dl.getListener().bot, dl.getListener().update, button)
aria2.remove([download])
return
update_all_messages()
def __onDownloadComplete(self, api: API, gid):
LOGGER.info(f"onDownloadComplete: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
if download.followed_by_ids:
new_gid = download.followed_by_ids[0]
new_download = api.get_download(new_gid)
with download_dict_lock:
download_dict[dl.uid()] = AriaDownloadStatus(new_gid, dl.getListener())
if new_download.is_torrent:
download_dict[dl.uid()].is_torrent = True
update_all_messages()
LOGGER.info(f'Changed gid from {gid} to {new_gid}')
else:
if dl: threading.Thread(target=dl.getListener().onDownloadComplete).start()
@new_thread
def __onDownloadPause(self, api, gid):
LOGGER.info(f"onDownloadPause: {gid}")
dl = getDownloadByGid(gid)
dl.getListener().onDownloadError('Download stopped by user!')
@new_thread
def __onDownloadStopped(self, api, gid):
LOGGER.info(f"onDownloadStop: {gid}")
dl = getDownloadByGid(gid)
if dl: dl.getListener().onDownloadError('Your torrent has no seeds.Download stopped automatically.')
@new_thread
def __onDownloadError(self, api, gid):
sleep(0.5) #sleep for split second to ensure proper dl gid update from onDownloadComplete
LOGGER.info(f"onDownloadError: {gid}")
dl = getDownloadByGid(gid)
download = api.get_download(gid)
error = download.error_message
LOGGER.info(f"Download Error: {error}")
if dl: dl.getListener().onDownloadError(error)
def start_listener(self):
aria2.listen_to_notifications(threaded=True, on_download_start=self.__onDownloadStarted,
on_download_error=self.__onDownloadError,
on_download_pause=self.__onDownloadPause,
on_download_stop=self.__onDownloadStopped,
on_download_complete=self.__onDownloadComplete)
def add_download(self, link: str, path, listener, filename):
if is_magnet(link):
download = aria2.add_magnet(link, {'dir': path, 'out': filename})
else:
download = aria2.add_uris([link], {'dir': path, 'out': filename})
if download.error_message: #no need to proceed further at this point
listener.onDownloadError(download.error_message)
return
with download_dict_lock:
download_dict[listener.uid] = AriaDownloadStatus(download.gid,listener)
LOGGER.info(f"Started: {download.gid} DIR:{download.dir} ") | PypiClean |
/BenchExec-3.17.tar.gz/BenchExec-3.17/benchexec/tools/esbmc.py |
import os
from benchexec.tools.sv_benchmarks_util import get_data_model_from_task, ILP32, LP64
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool2):
"""
This class serves as tool adaptor for ESBMC (http://www.esbmc.org/)
"""
def executable(self, tool_locator):
return tool_locator.find_executable("esbmc-wrapper.py")
def working_directory(self, executable):
executableDir = os.path.dirname(executable)
return executableDir
def version(self, executable):
return self._version_from_tool(executable, "-v")
def name(self):
return "ESBMC"
def cmdline(self, executable, options, task, rlimits):
data_model_param = get_data_model_from_task(task, {ILP32: "32", LP64: "64"})
if data_model_param and "--arch" not in options:
options += ["--arch", data_model_param]
return (
[executable]
+ ["-p", task.property_file]
+ options
+ [task.single_input_file]
)
def determine_result(self, run):
status = result.RESULT_UNKNOWN
if run.output.any_line_contains("FALSE_DEREF"):
status = result.RESULT_FALSE_DEREF
elif run.output.any_line_contains("FALSE_FREE"):
status = result.RESULT_FALSE_FREE
elif run.output.any_line_contains("FALSE_MEMTRACK"):
status = result.RESULT_FALSE_MEMTRACK
elif run.output.any_line_contains("FALSE_MEMCLEANUP"):
status = result.RESULT_FALSE_MEMCLEANUP
elif run.output.any_line_contains("FALSE_OVERFLOW"):
status = result.RESULT_FALSE_OVERFLOW
elif run.output.any_line_contains("FALSE_TERMINATION"):
status = result.RESULT_FALSE_TERMINATION
elif run.output.any_line_contains("FALSE"):
status = result.RESULT_FALSE_REACH
elif run.output.any_line_contains("TRUE"):
status = result.RESULT_TRUE_PROP
elif run.output.any_line_contains("DONE"):
status = result.RESULT_DONE
if status == result.RESULT_UNKNOWN:
if run.was_timeout:
status = result.RESULT_TIMEOUT
elif not run.output.any_line_contains("Unknown"):
status = "ERROR"
return status | PypiClean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.