id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/DendroPy_calver-2023.330.2-py3-none-any.whl/dendropy/interop/itol.py |
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010-2015 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.rst" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Wrappers for interacting with ITOL.
"""
import os
import sys
import tempfile
if sys.version_info.major < 3:
from urllib import urlencode
from urllib import urlopen
else:
from urllib.parse import urlencode
from urllib.request import urlopen
from urllib import request
import zipfile
try:
import zlib
COMPRESSION_TYPE = zipfile.ZIP_DEFLATED
except:
COMPRESSION_TYPE = zipfile.ZIP_STORED
import dendropy
from dendropy.utility import textprocessing
from dendropy.utility import urlio
from dendropy.utility import error
class ItolService(object):
DISPLAY_MODE_NORMAL = 1
DISPLAY_MODE_CIRCULAR = 2
DISPLAY_MODE_UNROOTED = 3
RENDER_PARAM_NAMES = set([
"newick_format",
"display_mode",
"tree_x",
"tree_y",
"vertical_shift_factor",
"horizontal_scale_factor",
"current_font_size",
"current_font_name",
"current_font_style",
"leaf_sorting",
"label_display",
"align_labels",
"label_shift",
"dashed_lines",
"ignore_branch_length",
"slanted_normal",
"arc",
"rotation",
"normal_rotation",
"unrooted_rotation",
"line_width",
"default_branch_color",
"default_label_color",
"inverted",
"circle_size_inverted",
"range_mode",
"include_ranges_legend",
"ranges_legend_title",
"internal_marks",
"internal_scale",
"internalScale1",
"internalScale2",
"internalScale1Color",
"internalScale2Color",
"branchlength_display",
"branchlength_label_size",
"branchlength_label_position",
"branchlength_label_sci",
"branchlength_label_rounding",
"branchlength_label_age",
"metadata_source",
"bootstrap_display",
"bootstrap_type",
"bootstrap_symbol",
"bootstrap_symbol_min",
"bootstrap_symbol_max",
"bootstrap_symbol_position",
"bootstrap_symbol_color",
"bootstrap_slider_min",
"bootstrap_slider_max",
"bootstrap_label_position",
"bootstrap_label_size",
"bootstrap_label_sci",
"bootstrap_label_rounding",
"bootstrap_width_min",
"bootstrap_width_max",
"bootstrap_min_color",
"bootstrap_use_mid_color",
"bootstrap_mid_color",
"bootstrap_max_color",
"datasets_visible",
])
def __init__(self, **kwargs):
"""
Keyword Arguments
-----------------
newick_format: int
Possible value: ID. Only used when export format is set to newick. If specified, internal node IDs will be included in exported Newick trees.
display_mode: int
Possible values: 1,2 or 3 (1=normal, 2=circular, 3=unrooted)
tree_x: int
Tree will be shifted horizontally by this value, positive or negative (value in pixels)
tree_y: int
Tree will be shifted vertically by this value, positive or negative (value in pixels)
vertical_shift_factor: int
Vertical tree scaling factor (positive number)
horizontal_scale_factor: int
Horizontal tree scaling factor (positive number)
current_font_size: int
Main label font size in pixels (integer >= 9)
current_font_name: int
Possible values: 'Arial', 'Courier', 'Courier New', 'Verdana', 'Impact', 'Georgia', 'Times New Roman' or 'Monotype Corsiva'
current_font_style: int
Possible values: 'normal', 'bold' or 'bold italic'
leaf_sorting: int
Possible values: 1 or 2 (1=normal sorting, 2=no sorting)
label_display: int
Possible values: 0 or 1 (0=hide labels, 1=show labels)
align_labels: int
Possible values: 0 or 1 (0=labels not aligned, 1=labels aligned)
label_shift: int
Amount to shift the leaf labels (positive or negative, in pixels)
dashed_lines: int
Display dashed lines connecting branches to leaf labels. Possible values: 0 or 1 (0=hide lines, 1=display lines)
ignore_branch_length: int
Possible values: 0 or 1
slanted_normal: int
Display tree in slanted (triangular) mode. Note that display_mode
must be 1. Possible values: 0 or 1
arc: int
Angle of the display arc for the circular tree (in degrees, a number between 0 and 360)
rotation: int
Rotation angle for circular tree (in degrees, a number between 0 and 360)
normal_rotation: int
Rotation angle for normal tree (in degrees, a number between 0 and 360)
unrooted_rotation: int
Rotation angle for unrooted tree (in degrees, a number between 0 and 360).
line_width: int
Width of tree lines (in pixels).
default_branch_color: str
Default color of tree's branches (hex, RGB or RGBA notation).
default_label_color: str
Default color of tree's labels (hex, RGB or RGBA notation).
inverted: int
Inverted display (only for circular and normal mode). Possible values: 0 or 1.
circle_size_inverted: int
For inverted circular display only. Internal radius will be
increased by this amount (value in pixels)
range_mode: int
Colored ranges display style. Possible values: 0,1 or 2 (0=off,
1=cover labels only, 2=cover full clades)<
include_ranges_legend: int
Include colored ranges legend. Possible values: 0 or 1.
ranges_legend_title: str
Title of the colored ranges legend.
internal_marks: int
Draw circles to mark the location of internal nodes. Possible
values: 0, 1 or 2 (0=Do not display, 1=Display on nodes with one
child, 2=Display always).
internal_scale: int
Draw internal tree scale. Possible values: 0 or 1.
internalScale1: int
Repeat value for the first set of internal scale lines (branch
length value)
internalScale2: int
Repeat value for the second set of internal scale lines (branch
length value).
internalScale1Color: str
Color for the first set of internal scale lines (hex, RGB or RGBA).
internalScale2Color: str
Color for the second set of internal scale lines (hex, RGB or RGBA).
branchlength_display: int
Display branch length text labels on branches (possible values: 0 or 1).
branchlength_label_size: int
Font size for the branch length labels (in pixels, integer >= 0)
branchlength_label_position: int
Position of the branch length labels along the branch (in percent,
default is 50%).
branchlength_label_sci: int
Display branch length labels in scientific notation (if set to 1).
branchlength_label_rounding
Round branch length labels to this number of decimals (integer >= 0).
branchlength_label_age
Display node age instead of raw branch length values (if set to 1).
metadata_source: str
Which metadata source to use for bootstrap display options
(default: 'bootstrap', other possible options depend on the data in
the tree).
bootstrap_display: int
Display metadata values (possible values: 0 or 1).
bootstrap_type: int
Type of metadata display. Possible values: 1, 2, 3 or 4 (1=Symbol,
2=Text label, 3=Branch color and 4=Branch width.
bootstrap_symbol: int
Symbol used to display metadata values. Possible values: 1, 2, 3 or 4 (1=Circle, 2=Triangle, 3=Square and 4=Star).
bootstrap_symbol_min: int
Minimum size for the metadata symbol (in pixels).
bootstrap_symbol_max: int
Maximum size for the metadata symbol (in pixels).
bootstrap_symbol_position: int
Position of the metadata symbol along the branch (in percent,
default is 50%).
bootstrap_symbol_color: str
Bootstrap symbol color (hex, RGB or RGBA).
bootstrap_slider_min: int
Minimum metadata value to display.
bootstrap_slider_max: int
Maximum metadata value to display.
bootstrap_label_position: int
Position of the metadata text label along the branch (in percent,
default is 50%).
bootstrap_label_size: int
Font size for the metadata text labels (in pixels, integer >= 9).
bootstrap_label_sci: int
Display metadata labels in scientific notation (if set to 1).
bootstrap_label_rounding: int
Round metadata labels to this number of decimals (integer >= 0).
bootstrap_width_min: int
Branch width for minimum metadata value (in pixels), when
bootstrap_type=4.
bootstrap_width_max: int
Branch width for maximum metadata value (in pixels), when
bootstrap_type=4.
bootstrap_min_color: int
Branch color for minimum metadata value (hex, RGB or RGBA), when
bootstrap_type=3.
bootstrap_use_mid_color: int
Use a three color gradient for metadata branch colors (possible
values: 0 or 1).
bootstrap_mid_color: str
Branch color for midpoint metadata value (hex, RGB or RGBA), when
bootstrap_type=3 and bootstrap_use_mid_color=1.
bootstrap_max_color: str
Branch color for maximum metadata value (hex, RGB or RGBA), when
bootstrap_type=3.
datasets_visible: int
Comma delimited list of datasets to display (starting with 0, e.g.
datasets_visible=0,2,5).
"""
self._base_upload_url = "https://itol.embl.de/batch_uploader.cgi"
self._base_download_url = "https://itol.embl.de/batch_downloader.cgi"
self._headers = {"Content-type": "application/x-www-form-urlencoded",
"Accept": "text/plain"}
self._render_params = {}
for k, v in kwargs:
setattr(self, k, v)
def __setattr__(self, name, value):
if name in ItolService.RENDER_PARAM_NAMES:
if value is None:
try:
del self._render_params[name]
except KeyError:
pass
else:
self._render_params[name] = value
object.__setattr__(self, name, value)
def read_remote(self,
tree_id,
result_format):
"""
Executes query and returns binary result.
"""
params_d = {
"tree": tree_id,
"format": result_format,
}
params_d.update(self._render_params)
params = urlencode(params_d)
req = request.Request(
self._base_download_url,
params.encode("ascii"),
self._headers)
response = urlopen(req)
result = response.read()
return result
def download(self,
dest,
tree_id,
result_format):
"""
Executes query and writes binary result to 'dest'.
Examples
--------
::
from dendropy.interop import itol
itol_service = itol.ItolService()
itol_service.display_mode = 1
itol_service.label_display = 0
itol_service.ignore_branch_length = 1
itol_service.normal_rotation = 90
itol_service.slanted_normal = 1
itol_service.line_width = 13
itol_service.internal_scale = 0
result_format = "pdf" # "png", "svg", etc.
itol_service.download(
dest="x1.{}".format(result_format),
tree_id=709552230231221546743590,
result_format=result_format,)
"""
result = self.read_remote(
tree_id=tree_id,
result_format=result_format,)
if isinstance(dest, str):
dest = open(dest, "wb")
with dest:
dest.write(result)
def upload(self, tree):
"""
Uploads a tree to iTOL.
Examples
--------
::
import dendropy
from dendropy.utility import itol
tree = dendropy.Tree.get(data="[&R] (A1,(B2,(C3,(D4,E5))));",
schema="newick")
itol_service = ItolService()
tree_id = itol_service.upload(tree)
print(tree_id)
"""
import requests
with tempfile.TemporaryDirectory() as temp_dirname:
zf_path = os.path.join(temp_dirname, "data.zip")
tree_str = tree.as_string(schema="newick")
zf = zipfile.ZipFile(zf_path, mode="w", compression=COMPRESSION_TYPE)
# zf = zipfile.ZipFile(zf_path, mode="w")
with zf:
zf.writestr("tree.tree", tree_str,)
with open(zf_path, "rb") as zf:
files = {"zipFile": open(zf_path, "rb")}
response = urlio.post_request(
url=self._base_upload_url,
files=files)
response_text = response.text
if "SUCCESS" not in response_text:
raise error.ExternalServiceError(
service_name="iTOL",
invocation_command=self._base_upload_url,
service_input="",
returncode=-1,
stdout=response_text,
stderr="")
response_lines = [row for row in response_text.split("\n") if row]
warnings = response_lines[0:-1]
tree_id = response_lines[-1].split()[1]
return tree_id
def render_tree(self,
tree,
dest,
result_format,):
"""
Renders a tree using the iTOL web service.
Examples
--------
::
itol_service = ItolService()
result_format = "pdf"
tree = dendropy.Tree.get(data="[&R] (A1,(B2,(C3,(D4,E5))));",
schema="newick")
itol_service.render_tree(
tree=tree,
dest="sample.{}".format(result_format),
result_format=result_format,)
"""
tree_id = self.upload(tree)
self.download(
dest=dest,
tree_id=tree_id,
result_format=result_format,)
return tree_id | PypiClean |
/ClipExtractor-0.0.2.tar.gz/ClipExtractor-0.0.2/README.md | # ClipExtractor
This is a Python package for easy to the ai art prompts extracting by using clip
models to getting image captioning .to get some information for any artworks,
portrait, art name, art movement, Model, medium, Artist, Trending, or Flavors.
special thanks to best friend [@pharmapsychotic](https://twitter.com/pharmapsychotic) for his open source code,
I converted it to a simple python package for easy use for users only using three steps.
## Installation
```
pip install ClipExtractor==0.0.2
or in colab google cloud
!pip install ClipExtractor==0.0.2
```
## Tutorial
[Colab Google Drive](https://colab.research.google.com/drive/1BVrL-lk4fnEH2acrJq1zIIgHGWBr419Z?usp=sharing)
```
u can see tutorial in colab google drive
```
| PypiClean |
/FuzzyClassificator-1.3.84-py3-none-any.whl/FuzzyRoutines.py |
# FuzzyClassificator - this program uses neural networks to solve classification problems,
# and uses fuzzy sets and fuzzy logic to interpreting results.
# Copyright (C) 2017, Timur Gilmullin
# e-mail: [email protected]
# Library contains some routines for work with fuzzy logic operators, fuzzy datasets and fuzzy scales.
import math
import copy
import traceback
from FCLogger import FCLogger
def DiapasonParser(diapason):
"""
Parse input with diapason string and return sorted list of full and unique indexes in that diapason.
Examples:
String "1,5" converted to: [1, 5]
String "1-5" converted to: [1, 2, 3, 4, 5]
String "8-10, 1-5, 6" converted to: [1, 2, 3, 4, 5, 6, 8, 9, 10]
String "11, 11, 12, 12, 1-5, 3-7" converted to: [1, 2, 3, 4, 5, 6, 7, 11, 12]
"""
fullDiapason = []
try:
for element in diapason.split(','):
fullDiapason += [x for x in range(int(element.split('-')[0]), int(element.split('-')[-1]) + 1)]
except Exception:
FCLogger.error('"{}" is not correct diapason string!'.format(diapason))
return []
return sorted(list(set(fullDiapason)))
def IsNumber(value):
"""
Return True if value is float or integer number.
"""
return bool(not isinstance(value, bool) and (isinstance(value, int) or isinstance(value, float)))
def IsCorrectFuzzyNumberValue(value):
"""
All operations in fuzzy logic are executed with numbers in interval [0, 1].
"""
if IsNumber(value):
return (0. <= value) and (value <= 1.)
else:
FCLogger.error('{} not a real number in [0, 1], type = {}'.format(str(value), type(value)))
return False
def FuzzyNOT(fuzzyNumber, alpha=0.5):
"""
Fuzzy logic NOT operator. y = 1 - Fuzzy if alpha = 0.5
"""
result = None # return None if errors
if IsCorrectFuzzyNumberValue(fuzzyNumber) and IsCorrectFuzzyNumberValue(alpha) and alpha > 0:
if (0 <= fuzzyNumber) and (fuzzyNumber <= alpha):
result = fuzzyNumber * (alpha - 1) / alpha + 1
else:
result = (fuzzyNumber - 1) * alpha / (alpha - 1)
return result
def FuzzyNOTParabolic(fuzzyNumber, alpha=0.5, epsilon=0.001):
"""
Parabolic fuzzy NOT operator. 2a - x - y = (2a - 1)(y - x)^2.
"""
result = None # return None if errors
if IsCorrectFuzzyNumberValue(fuzzyNumber) and IsCorrectFuzzyNumberValue(alpha) and IsCorrectFuzzyNumberValue(epsilon) and alpha > 0:
if fuzzyNumber == 0:
result = 1
elif fuzzyNumber == 1:
result = 0
else:
y = 0
while (y <= 1) and abs((2 * alpha - fuzzyNumber - y) - (2 * alpha - 1) * (y - fuzzyNumber) ** 2) >= epsilon / 2:
y += epsilon
result = y
return result
def FuzzyAND(aNumber, bNumber):
"""
Fuzzy AND operator is minimum of two numbers.
"""
if IsNumber(aNumber) and IsNumber(bNumber):
return min(aNumber, bNumber)
else:
return None # return None if errors
def FuzzyOR(aNumber, bNumber):
"""
Fuzzy OR operator is maximum of two numbers.
"""
if IsNumber(aNumber) and IsNumber(bNumber):
return max(aNumber, bNumber)
else:
return None # return None if errors
def TNorm(aFuzzyNumber, bFuzzyNumber, normType='logic'):
"""
T-Norm conjunctive operators.
normType is an operator's name:
'logic' - result of fuzzy logic AND (min operator),
'algebraic' - result of algebraic multiplication operation,
'boundary' - result of boundary multiplication operation,
'drastic' - result of drastic multiplication operation.
"""
result = None # return None if errors
if IsCorrectFuzzyNumberValue(aFuzzyNumber) and IsCorrectFuzzyNumberValue(bFuzzyNumber):
if normType == 'logic':
result = FuzzyAND(aFuzzyNumber, bFuzzyNumber)
if normType == 'algebraic':
result = aFuzzyNumber * bFuzzyNumber
if normType == 'boundary':
result = FuzzyOR(aFuzzyNumber + bFuzzyNumber - 1, 0)
if normType == 'drastic':
if aFuzzyNumber == 1:
result = bFuzzyNumber
elif bFuzzyNumber == 1:
result = aFuzzyNumber
else:
result = 0
return result
def TNormCompose(*fuzzyNumbers, normType='logic'):
"""
T-Norm compose of n numbers.
normType is an operator's name:
'logic' - result of fuzzy logic AND (min operator),
'algebraic' - result of algebraic multiplication operation,
'boundary' - result of boundary multiplication operation,
'drastic' - result of drastic multiplication operation.
"""
result = None # return None if errors
if len(fuzzyNumbers) >= 1:
if IsNumber(fuzzyNumbers[0]):
result = fuzzyNumbers[0]
for f in fuzzyNumbers[1:]:
result = TNorm(result, f, normType)
return result
def SCoNorm(aFuzzyNumber, bFuzzyNumber, normType='logic'):
"""
S-coNorm disjunctive operators.
normType is an operator's name:
'logic' - result of fuzzy logic OR (max operator),
'algebraic' - result of algebraic addition operation,
'boundary' - result of boundary addition operation,
'drastic' - result of drastic addition operation.
"""
result = None # return None if errors
if IsCorrectFuzzyNumberValue(aFuzzyNumber) and IsCorrectFuzzyNumberValue(bFuzzyNumber):
if normType == 'logic':
result = FuzzyOR(aFuzzyNumber, bFuzzyNumber)
if normType == 'algebraic':
result = aFuzzyNumber + bFuzzyNumber - aFuzzyNumber * bFuzzyNumber
if normType == 'boundary':
result = FuzzyAND(aFuzzyNumber + bFuzzyNumber, 1)
if normType == 'drastic':
if aFuzzyNumber == 0:
result = bFuzzyNumber
elif bFuzzyNumber == 0:
result = aFuzzyNumber
else:
result = 1
return result
def SCoNormCompose(*fuzzyNumbers, normType='logic'):
"""
S-coNorm compose of n numbers.
normType is an operator's name:
'logic' - result of fuzzy logic AND (min operator),
'algebraic' - result of algebraic multiplication operation,
'boundary' - result of boundary multiplication operation,
'drastic' - result of drastic multiplication operation.
"""
result = None # return None if errors
if len(fuzzyNumbers) >= 1:
if IsNumber(fuzzyNumbers[0]):
result = fuzzyNumbers[0]
for f in fuzzyNumbers[1:]:
result = SCoNorm(result, f, normType)
return result
class MFunction():
"""
Routines for work with some default membership functions.
"""
def __init__(self, userFunc, **membershipFunctionParams):
self.accuracy = 1000 # Line of numbers divided by points, affect on accuracy, using in integral calculating
self._functions = {'hyperbolic': self.Hyperbolic,
'bell': self.Bell,
'parabolic': self.Parabolic,
'triangle': self.Triangle,
'trapezium': self.Trapezium,
'exponential': self.Exponential,
'sigmoidal': self.Sigmoidal,
'desirability': self.Desirability} # Factory registrator for all membership functions
self.mju = self._functions[userFunc] # Calculate result of define membership function
if membershipFunctionParams or self.mju.__name__ == 'Desirability':
self._parameters = membershipFunctionParams # parameters for using in membership function
else:
raise Exception("You must specify all membership function's parameters!")
@property
def name(self):
return self.mju.__name__ # membership function method name
def __str__(self):
# return view of function: Function_name(**parameters). Example: Bell(x, {"a": 0.6, "b": 0.66, "c": 0.77}
funcView = '{}({})'.format(self.name, 'y' if self.name == 'Desirability' else 'x, {}'.format(
'{' + ', '.join('"{}": {}'.format(*val) for val in [(k, self._parameters[k])
for k in sorted(self._parameters)]) + '}'))
return funcView
@property
def parameters(self):
return self._parameters # all membership function parameters
@parameters.setter
def parameters(self, value):
if value or self.mju.__name__ == 'Desirability':
self._parameters = value
else:
raise Exception("You must specify all membership function's parameters!")
def Hyperbolic(self, x):
"""
This is hyperbolic membership function with real inputs x and parameters a, b, c.
"""
a, b, c, result = 0, 0, 0, 0
try:
a = self._parameters['a']
b = self._parameters['b']
c = self._parameters['c']
if x <= c:
result = 1
else:
result = 1 / (1 + (a * (x - c)) ** b)
except Exception:
FCLogger.error(traceback.format_exc())
FCLogger.error('Hyperbolic membership function use real inputs x and parameters a, b, c.')
FCLogger.error('Your inputs: mju_hyperbolic({}, {}, {}, {})'.format(x, a, b, c))
return 0
return result
def Bell(self, x):
"""
This is bell membership function with real inputs x and parameters a, b, c.
"""
a, b, c, result = 0, 0, 0, 0
try:
a = self._parameters['a']
b = self._parameters['b']
c = self._parameters['c']
if x < b:
result = self.Parabolic(x)
elif (b <= x) and (x <= c):
result = 1
else:
aOld = self._parameters['a']
bOld = self._parameters['b']
self._parameters['a'] = c
self._parameters['b'] = c + b - a
result = 1 - self.Parabolic(x)
self._parameters['a'] = aOld
self._parameters['b'] = bOld
except Exception:
FCLogger.error(traceback.format_exc())
FCLogger.error('Bell membership function use real inputs x and parameters a, b, c.')
FCLogger.error('Your inputs: mju_bell({}, {}, {}, {})'.format(x, a, b, c))
return 0
return result
def Parabolic(self, x):
"""
This is parabolic membership function with real inputs x and parameters a, b.
"""
a, b, result = 0, 0, 0
try:
a = self._parameters['a']
b = self._parameters['b']
if x <= a:
result = 0
elif (a < x) and (x <= (a + b) / 2):
result = (2 * (x - a) ** 2) / (b - a) ** 2
elif ((a + b) / 2 < x) and (x < b):
result = 1 - (2 * (x - b) ** 2) / (b - a) ** 2
else:
result = 1
except Exception:
FCLogger.error(traceback.format_exc())
FCLogger.error('Parabolic membership function use real inputs x and parameters a, b.')
FCLogger.error('Your inputs: mju_parabolic({}, {}, {})'.format(x, a, b))
return 0
return result
def Triangle(self, x):
"""
This is triangle membership function with real inputs x and parameters a, b, c.
"""
a, b, c, result = 0, 0, 0, 0
try:
a = self._parameters['a']
b = self._parameters['b']
c = self._parameters['c']
if x <= a:
result = 0
elif (a < x) and (x <= c):
result = (x - a) / (c - a)
elif (c < x) and (x < b):
result = (b - x) / (b - c)
else:
result = 0
except Exception:
FCLogger.error(traceback.format_exc())
FCLogger.error('Triangle membership function use real inputs x and parameters a, b, c.')
FCLogger.error('Your inputs: mju_triangle({}, {}, {}, {})'.format(x, a, b, c))
return 0
return result
def Trapezium(self, x):
"""
This is trapezium membership function with real inputs x and parameters a, b, c, d.
"""
a, b, c, d, result = 0, 0, 0, 0, 0
try:
a = self._parameters['a']
b = self._parameters['b']
c = self._parameters['c']
d = self._parameters['d']
if x < a:
result = 0
elif (a < x) and (x < c):
result = (x - a) / (c - a)
elif (c <= x) and (x <= d):
result = 1
elif (d < x) and (x <= b):
result = (b - x) / (b - d)
else:
result = 0
except Exception:
FCLogger.error(traceback.format_exc())
FCLogger.error('Trapezium membership function use real inputs x and parameters a, b, c, d.')
FCLogger.error('Your inputs: mju_trapezium({}, {}, {}, {}, {})'.format(x, a, b, c, d))
return 0
return result
def Exponential(self, x):
"""
This is exponential membership function with real inputs x and parameters a, b.
"""
a, b, result = 0, 0, 0
try:
a = self._parameters['a']
b = self._parameters['b']
if b != 0:
result = math.exp(1) ** (-0.5 * ((x - a) / b) ** 2)
except Exception:
FCLogger.error(traceback.format_exc())
FCLogger.error('Exponential membership function use real inputs x and parameters a, b.')
FCLogger.error('Your inputs: mju_exponential({}, {}, {})'.format(x, a, b))
return 0
return result
def Sigmoidal(self, x):
"""
This is sigmoidal membership function with real inputs x and parameters a, b.
"""
a, b, result = 0, 0, 0
try:
a = self._parameters['a']
b = self._parameters['b']
result = 1 / (1 + math.exp(1) ** (-a * (x - b)))
except Exception:
FCLogger.error(traceback.format_exc())
FCLogger.error('Sigmoidal membership function use real inputs x and parameters a, b.')
FCLogger.error('Your inputs: mju_sigmoidal({}, {}, {})'.format(x, a, b))
return 0
return result
def Desirability(self, y):
"""
This is Harrington's desirability membership function with real input y without any parameters.
"""
try:
result = math.exp(-math.exp(-y))
except Exception:
FCLogger.error(traceback.format_exc())
FCLogger.error("Harrington's desirability membership function use only real input y without any parameters.")
FCLogger.error('Your inputs: mju_desirability({})'.format(y))
return 0
return result
class FuzzySet():
"""
Routines for work with fuzzy sets.
Fuzzy set A = <membershipFunction, supportSet>
"""
def __init__(self, membershipFunction, supportSet=(0., 1.), linguisticName='FuzzySet'):
if isinstance(linguisticName, str):
self._name = linguisticName
else:
raise Exception("Linguistic name of Fuzzy Set must be a string value!")
if isinstance(membershipFunction, MFunction):
self._mFunction = membershipFunction # instance of MembershipFunction class
else:
raise Exception('Not MFunction class instance was given!')
if isinstance(supportSet, tuple) and (len(supportSet) == 2) and (supportSet[0] < supportSet[1]):
self._supportSet = supportSet # support set of given membership function
else:
raise Exception('Support Set must be 2-dim tuple (a, b) with real a, b parameters, a < b!')
self._defuzValue = self._Defuz() # initiating defuzzy value of current fuzzy set
def __str__(self):
# return view of fuzzy set - name = <mju(x|y, params), supportSet>. Example: FuzzySet = <Bell(x, a, b), [0, 1]>
fSetView = '{} = <{}, [{}, {}]>'.format(self._name, self._mFunction, self._supportSet[0], self._supportSet[1])
return fSetView
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if isinstance(value, str):
self._name = value
else:
raise Exception("Linguistic name of Fuzzy Set must be a string value!")
@property
def mFunction(self):
return self._mFunction # current membership
@mFunction.setter
def mFunction(self, value):
if isinstance(value, MFunction):
self._mFunction = value
else:
raise Exception('Not MFunction class instance was given!')
@property
def supportSet(self):
return self._supportSet
@supportSet.setter
def supportSet(self, value):
if isinstance(value, tuple) and (len(value) == 2) and (value[0] < value[1]):
self._supportSet = value # new support set of given membership function
else:
raise Exception('Support Set must be 2-dim tuple (a, b) with real a, b parameters, a < b!')
@property
def defuzValue(self):
return self._defuzValue
def _Defuz(self):
"""
Defuzzyfication function returns real value in support set of given fuzzy set using "center of gravity method".
Integrals in this method calculated from left to right border of support set of membership function.
Integrals are approximately calculated by Newton-Leibniz formula.
"""
left = self._supportSet[0]
right = self._supportSet[1]
step = (right - left) / self._mFunction.accuracy
numeratorIntegral = 0
denominatorIntegral = 0
for iteration in range(self._mFunction.accuracy):
x = left + (iteration + 1) * step
mjuValue = self._mFunction.mju(x)
numeratorIntegral += x * mjuValue
denominatorIntegral += mjuValue
return numeratorIntegral / denominatorIntegral
def Defuz(self):
"""
This function now used for backward compatibility.
"""
return self._defuzValue
class FuzzyScale():
"""
Routines for work with fuzzy scales. Fuzzy scale is an ordered set of linguistic variables.
Fuzzy scale contains named levels and its MF. This object looks like this:
S = [{'name': 'name_1', 'fSet': fuzzySet_1},
{'name': 'name_2', 'fSet': fuzzySet_2}, ...]
where name-key is a linguistic name of fuzzy set,
fSet-key is a user define fuzzy set, an instance of FuzzySet class.
"""
def __init__(self):
self._name = 'DefaultScale' # default scale contains 3 levels, DefaultScale = {Min, Med, High}:
self._levels = [{'name': 'Min',
'fSet': FuzzySet(membershipFunction=MFunction('hyperbolic', **{'a': 7, 'b': 4, 'c': 0}),
supportSet=(0., 1.),
linguisticName='Minimum')},
{'name': 'Med',
'fSet': FuzzySet(membershipFunction=MFunction('bell', **{'a': 0.35, 'b': 0.5, 'c': 0.6}),
supportSet=(0., 1.),
linguisticName='Medium')},
{'name': 'High',
'fSet': FuzzySet(membershipFunction=MFunction('triangle', **{'a': 0.7, 'b': 1, 'c': 1}),
supportSet=(0., 1.),
linguisticName='High')}]
self._levelsNames = self._GetLevelsNames() # dictionary with only levels' names
self._levelsNamesUpper = self._GetLevelsNamesUpper() # dictionary with only level's names in upper cases
def __str__(self):
# return view of fuzzy scale - name = {**levels} and levels interpreter. Example:
# DefaultScale = {Min, Med, High}
# Minimum = <Hyperbolic(x, {"a": 7, "b": 4, "c": 0}), [0.0, 1.0]>
# Medium = <Bell(x, {"a": 0.35, "b": 0.5, "c": 0.6}), [0.0, 1.0]>
# High = <Triangle(x, {"a": 0.7, "b": 1, "c": 1}), [0.0, 1.0]>
allLevelsName = self._levels[0]['name']
allLevels = '\n {}'.format(self._levels[0]['fSet'].__str__())
for level in self._levels[1:]:
allLevelsName += ', {}'.format(level['name'])
allLevels += '\n {}'.format(str(level['fSet']))
scaleView = '{} = {{{}}}{}'.format(self._name, allLevelsName, allLevels)
return scaleView
@property
def name(self):
return self._name
@name.setter
def name(self, value):
if isinstance(value, str):
self._name = value
else:
raise Exception("Name of Fuzzy Scale must be a string value!")
@property
def levels(self):
return self._levels
@levels.setter
def levels(self, value):
if value:
for level in value:
if isinstance(level, dict) and (len(level) == 2) and ('name' and 'fSet' in level.keys()):
if not isinstance(level['name'], str):
raise Exception("Level name - 'name' parameter - must be a string value!")
if not isinstance(level['fSet'], FuzzySet):
raise Exception("Fuzzy set - 'fSet' parameter - must be an instance of FuzzySet class!")
nameCount = 0 # check for unique name:
for otherLevel in value:
if otherLevel['name'] == level['name']:
nameCount += 1
if nameCount > 1:
raise Exception("The scale contains no unique levels! Warning for: {}".format(level['name']))
else:
raise Exception("Level of fuzzy scale must be 2-dim dictionary looks like {'name': 'level_name', 'fSet': FuzzySet_instance}!")
self._levels = value # set up new list of fuzzy levels
self._levelsNames = self._GetLevelsNames() # updating dictionary with only levels' names
self._levelsNamesUpper = self._GetLevelsNamesUpper() # updating dictionary with only level's names in upper cases
else:
raise Exception('Fuzzy scale must contain at least one linguistic variable!')
def _GetLevelsNames(self):
"""
Returns dictionary with only fuzzy levels' names and it's fuzzy set.
Example: {'Min': <fSet_Object>, 'Med': <fSet_Object>, 'High': <fSet_Object>}
"""
return dict([(x['name'], self._levels[lvl]) for lvl, x in enumerate(self._levels)])
def _GetLevelsNamesUpper(self):
"""
Returns dictionary with only fuzzy levels' names in upper cases and it's fuzzy set.
Example: {'MIN': <fSet_Object>, 'MED': <fSet_Object>, 'HIGH': <fSet_Object>}
"""
return dict([(x['name'].upper(), self._levels[lvl]) for lvl, x in enumerate(self._levels)])
def Fuzzy(self, realValue):
"""
Fuzzyfication function returns one of levels on fuzzy scale for given real value who MF(value) are highest.
"""
fuzzyLevel = self._levels[0]
for level in self._levels[1:]:
if fuzzyLevel['fSet'].mFunction.mju(realValue) <= level['fSet'].mFunction.mju(realValue):
fuzzyLevel = level
return fuzzyLevel
def GetLevelByName(self, levelName, exactMatching=True):
"""
Function return fuzzy level as dictionary level = {'name': 'level_name', 'fSet': fuzzySet}
exactMatching is a flag for exact matching search,
if True then levelName must be equal to level['name'],
otherwise - level['name'] in uppercase must contains levelName in uppercase.
"""
if exactMatching:
return self._levelsNames.get(levelName)
else:
return self._levelsNamesUpper.get(levelName.upper())
class UniversalFuzzyScale(FuzzyScale):
"""
Iniversal fuzzy scale S_f = {Min, Low, Med, High, Max}. Example view:
FuzzyScale = {Min, Low, Med, High, Max}
Min = <Hyperbolic(x, {"a": 8, "b": 20, "c": 0}), [0.0, 0.23]>
Low = <Bell(x, {"a": 0.17, "b": 0.23, "c": 0.34}), [0.17, 0.4]>
Med = <Bell(x, {"a": 0.34, "b": 0.4, "c": 0.6}), [0.34, 0.66]>
High = <Bell(x, {"a": 0.6, "b": 0.66, "c": 0.77}), [0.6, 0.83]>
Max = <Parabolic(x, {"a": 0.77, "b": 0.95}), [0.77, 1.0]>
"""
def __init__(self):
super().__init__()
self._name = 'FuzzyScale' # default universal fuzzy scale contains 5 levels, FuzzyScale = {Min, Low, Med, High, Max}:
self._levels = [{'name': 'Min',
'fSet': FuzzySet(membershipFunction=MFunction('hyperbolic', **{'a': 8, 'b': 20, 'c': 0}),
supportSet=(0., 0.23),
linguisticName='Min')},
{'name': 'Low',
'fSet': FuzzySet(membershipFunction=MFunction('bell', **{'a': 0.17, 'b': 0.23, 'c': 0.34}),
supportSet=(0.17, 0.4),
linguisticName='Low')},
{'name': 'Med',
'fSet': FuzzySet(membershipFunction=MFunction('bell', **{'a': 0.34, 'b': 0.4, 'c': 0.6}),
supportSet=(0.34, 0.66),
linguisticName='Med')},
{'name': 'High',
'fSet': FuzzySet(membershipFunction=MFunction('bell', **{'a': 0.6, 'b': 0.66, 'c': 0.77}),
supportSet=(0.6, 0.83),
linguisticName='High')},
{'name': 'Max',
'fSet': FuzzySet(membershipFunction=MFunction('parabolic', **{'a': 0.77, 'b': 0.95}),
supportSet=(0.77, 1.),
linguisticName='Max')}]
self._levelsNames = self._GetLevelsNames() # dictionary with only universal fuzzy scale levels' names
self._levelsNamesUpper = self._GetLevelsNamesUpper() # dictionary with only level's names in upper cases
@property
def levels(self):
return self._levels # only readable levels and it's fuzzy set for Universal Fuzzy Scale
@property
def levelsNames(self):
return self._levelsNames # only levels' names of Universal Fuzzy Scale
@property
def levelsNamesUpper(self):
return self._levelsNamesUpper # only levels' names of Universal Fuzzy Scale in upper cases
if __name__ == "__main__":
# Some examples (just run this FuzzyRoutines module):
# --- Usage of some membership functions (uncomment one of them):
# mjuPars = {'a': 7, 'b': 4, 'c': 0} # hyperbolic params example
# funct = MFunction(userFunc='hyperbolic', **mjuPars) # creating instance of hyperbolic function
# mjuPars = {'a': 0, 'b': 0.3, 'c': 0.4} # bell params example
# funct = MFunction(userFunc='bell', **mjuPars) # creating instance of bell function
# mjuPars = {'a': 0, 'b': 1} # parabolic params example
# funct = MFunction(userFunc='parabolic', **mjuPars) # creating instance of parabolic function
# mjuPars = {'a': 0.2, 'b': 0.8, 'c': 0.7} # triangle params example
# funct = MFunction(userFunc='triangle', **mjuPars) # creating instance of triangle function
mjuPars = {'a': 0.1, 'b': 1, 'c': 0.5, 'd': 0.8} # trapezium params example
funct = MFunction(userFunc='trapezium', **mjuPars) # creating instance of trapezium function
# mjuPars = {'a': 0.5, 'b': 0.15} # exponential params example
# funct = MFunction(userFunc='exponential', **mjuPars) # creating instance of exponential function
# mjuPars = {'a': 15, 'b': 0.5} # sigmoidal params example
# funct = MFunction(userFunc='sigmoidal', **mjuPars) # creating instance of sigmoidal function
# funct = MFunction(userFunc='desirability') # creating instance of desirability function without parameters
print('Printing Membership function with parameters: ', funct)
# --- Calculating some function's values in [0, 1]:
xPar = 0
for i in range(0, 11, 1):
xPar = (xPar + i) / 10
res = funct.mju(xPar) # calculate one value of MF with given parameters
print('{} = {:1.4f}'.format(funct, res))
# --- Work with fuzzy set:
fuzzySet = FuzzySet(funct, (0., 1.)) # creating fuzzy set A = <mju_funct, support_set>
print('Printing fuzzy set after init and before changes:', fuzzySet)
print('Defuz({}) = {:1.2f}'.format(fuzzySet.name, fuzzySet.Defuz()))
changedMjuPars = copy.deepcopy(mjuPars) # change parameters of membership function with deepcopy example:
changedMjuPars['a'] = 0
changedMjuPars['b'] = 1
changedSupportSet = (0.5, 1) # change support set
fuzzySet.name = 'Changed fuzzy set'
fuzzySet.mFunction.parameters = changedMjuPars
fuzzySet.supportSet = changedSupportSet
print('New membership function with parameters: ', fuzzySet.mFunction)
print('New support set: ', fuzzySet.supportSet)
print('New value of Defuz({}) = {:1.2f}'.format(fuzzySet.name, fuzzySet.Defuz()))
print('Printing fuzzy set after changes:', fuzzySet)
# --- Work with fuzzy scales:
# Fuzzy scale is an ordered set of linguistic variables that looks like this:
# S = [{'name': 'name_1', 'fSet': fuzzySet_1}, {'name': 'name_2', 'fSet': fuzzySet_2}, ...],
# where name is a linguistic name of fuzzy set,
# fSet is a user define fuzzy set of FuzzySet type.
scale = FuzzyScale() # intialize new fuzzy scale with default levels
print('Printing default fuzzy scale in human-readable:', scale)
print('Defuz() of all default levels:')
for item in scale.levels:
print('Defuz({}) = {:1.2f}'.format(item['name'], item['fSet'].Defuz()))
print('Define some new levels:')
minFunct = MFunction('hyperbolic', **{'a': 2, 'b': 20, 'c': 0})
levelMin = FuzzySet(membershipFunction=minFunct, supportSet=(0., 0.5), linguisticName='min')
print('Printing Level 1 in human-readable:', levelMin)
medFunct = MFunction('bell', **{'a': 0.4, 'b': 0.55, 'c': 0.7})
levelMed = FuzzySet(membershipFunction=medFunct, supportSet=(0.25, 0.75), linguisticName='med')
print('Printing Level 2 in human-readable:', levelMed)
maxFunct = MFunction('triangle', **{'a': 0.65, 'b': 1, 'c': 1})
levelMax = FuzzySet(membershipFunction=maxFunct, supportSet=(0.7, 1.), linguisticName='max')
print('Printing Level 3 in human-readable:', levelMax)
scale.name = 'New Scale'
scale.levels = [{'name': levelMin.name, 'fSet': levelMin},
{'name': levelMed.name, 'fSet': levelMed},
{'name': levelMax.name, 'fSet': levelMax}] # add new ordered set of linguistic variables into scale
print('Changed List of levels as objects:', scale.levels)
print('Printing changed fuzzy scale in human-readable:', scale)
print('Defuz() of all New Scale levels:')
for item in scale.levels:
print('Defuz({}) = {:1.2f}'.format(item['name'], item['fSet'].Defuz()))
# --- Work with Universal Fuzzy Scale:
# Universal fuzzy scales S_f = {Min, Low, Med, High, Max} pre-defined in UniversalFuzzyScale() class.
uniFScale = UniversalFuzzyScale()
print('Levels of Universal Fuzzy Scale:', uniFScale.levels)
print('Printing scale:', uniFScale)
print('Defuz() of all Universal Fuzzy Scale levels:')
for item in uniFScale.levels:
print('Defuz({}) = {:1.2f}'.format(item['name'], item['fSet'].Defuz()))
# Use Fuzzy() function to looking for level on Fuzzy Scale:
xPar = 0
for i in range(0, 10, 1):
xPar = (xPar + i) / 10
res = uniFScale.Fuzzy(xPar) # calculate fuzzy level for some real values
print('Fuzzy({:1.1f}, {}) = {}, {}'.format(xPar, uniFScale.name, res['name'], res['fSet']))
# Finding fuzzy level using GetLevelByName() function:
print('Finding level by name with exact matching:')
res = uniFScale.GetLevelByName('Min')
print('GetLevelByName(Min, {}) = {}, {}'.format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
res = uniFScale.GetLevelByName('High')
print('GetLevelByName(High, {}) = {}, {}'.format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
res = uniFScale.GetLevelByName('max')
print('GetLevelByName(max, {}) = {}, {}'.format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
print('Finding level by name without exact matching:')
res = uniFScale.GetLevelByName('mIn', exactMatching=False)
print("GetLevelByName('mIn', {}) = {}, {}".format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
res = uniFScale.GetLevelByName('max', exactMatching=False)
print("GetLevelByName('max', {}) = {}, {}".format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
res = uniFScale.GetLevelByName('Hig', exactMatching=False)
print("GetLevelByName('Hig', {}) = {}, {}".format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
res = uniFScale.GetLevelByName('LOw', exactMatching=False)
print("GetLevelByName('LOw', {}) = {}, {}".format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
res = uniFScale.GetLevelByName('eD', exactMatching=False)
print("GetLevelByName('eD', {}) = {}, {}".format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
res = uniFScale.GetLevelByName('Highest', exactMatching=False)
print("GetLevelByName('Highest', {}) = {}, {}".format(uniFScale.name, res['name'] if res else 'None', res['fSet'] if res else 'None'))
# --- Work with fuzzy logic operators:
# Checks that number is in [0, 1]:
print('IsCorrectFuzzyNumberValue(0.5) =', IsCorrectFuzzyNumberValue(0.5))
print('IsCorrectFuzzyNumberValue(1.1) =', IsCorrectFuzzyNumberValue(1.1))
# Calculates result of fuzzy NOT, fuzzy NOT with alpha parameter and parabolic fuzzy NOT operations:
print('FNOT(0.25) =', FuzzyNOT(0.25))
print('FNOT(0.25, alpha=0.25) =', FuzzyNOT(0.25, alpha=0.25))
print('FNOT(0.25, alpha=0.75) =', FuzzyNOT(0.25, alpha=0.75))
print('FNOT(0.25, alpha=1) =', FuzzyNOT(0.25, alpha=1))
print('FNOTParabolic(0.25, alpha=0.25) =', FuzzyNOTParabolic(0.25, alpha=0.25))
print('FNOTParabolic(0.25, alpha=0.75) =', FuzzyNOTParabolic(0.25, alpha=0.75))
# Calculates result of fuzzy AND/OR operations:
print('FuzzyAND(0.25, 0.5) =', FuzzyAND(0.25, 0.5))
print('FuzzyOR(0.25, 0.5) =', FuzzyOR(0.25, 0.5))
# Calculates result of T-Norm operations, where T-Norm is one of conjunctive operators - logic, algebraic, boundary, drastic:
print("TNorm(0.25, 0.5, 'logic') =", TNorm(0.25, 0.5, normType='logic'))
print("TNorm(0.25, 0.5, 'algebraic') =", TNorm(0.25, 0.5, normType='algebraic'))
print("TNorm(0.25, 0.5, 'boundary') =", TNorm(0.25, 0.5, normType='boundary'))
print("TNorm(0.25, 0.5, 'drastic') =", TNorm(0.25, 0.5, normType='drastic'))
# Calculates result of S-coNorm operations, where S-coNorm is one of disjunctive operators - logic, algebraic, boundary, drastic:
print("SCoNorm(0.25, 0.5, 'logic') =", SCoNorm(0.25, 0.5, normType='logic'))
print("SCoNorm(0.25, 0.5, 'algebraic') =", SCoNorm(0.25, 0.5, normType='algebraic'))
print("SCoNorm(0.25, 0.5, 'boundary') =", SCoNorm(0.25, 0.5, normType='boundary'))
print("SCoNorm(0.25, 0.5, 'drastic') =", SCoNorm(0.25, 0.5, normType='drastic'))
# Calculates result of T-Norm operations for N numbers, N > 2:
print("TNormCompose(0.25, 0.5, 0.75, 'logic') =", TNormCompose(0.25, 0.5, 0.75, normType='logic'))
print("TNormCompose(0.25, 0.5, 0.75, 'algebraic') =", TNormCompose(0.25, 0.5, 0.75, normType='algebraic'))
print("TNormCompose(0.25, 0.5, 0.75, 'boundary') =", TNormCompose(0.25, 0.5, 0.75, normType='boundary'))
print("TNormCompose(0.25, 0.5, 0.75, 'drastic') =", TNormCompose(0.25, 0.5, 0.75, normType='drastic'))
# Calculates result of S-coNorm operations for N numbers, N > 2:
print("SCoNormCompose(0.25, 0.5, 0.75, 'logic') =", SCoNormCompose(0.25, 0.5, 0.75, normType='logic'))
print("SCoNormCompose(0.25, 0.5, 0.75, 'algebraic') =", SCoNormCompose(0.25, 0.5, 0.75, normType='algebraic'))
print("SCoNormCompose(0.25, 0.5, 0.75, 'boundary') =", SCoNormCompose(0.25, 0.5, 0.75, normType='boundary'))
print("SCoNormCompose(0.25, 0.5, 0.75, 'drastic') =", SCoNormCompose(0.25, 0.5, 0.75, normType='drastic'))
# --- Work with other methods:
print("Converting some strings to range of sorted unique numbers:")
print('String "1,5" converted to:', DiapasonParser("1,5"))
print('String "1-5" converted to:', DiapasonParser("1-5"))
print('String "8-10, 1-5, 6" converted to:', DiapasonParser("8-10, 1-5, 6"))
print('String "11, 11, 12, 12, 1-5, 3-7" converted to:', DiapasonParser("11, 12, 1-5, 3-7")) | PypiClean |
/Hyperstatic-0.2.0-cp38-cp38-win_amd64.whl/hyperstatic/app/general/result/result.py | from hyperstatic.app.general.orm import ResultJointDisplacement,ResultJointReaction,ResultFrameForce,ResultModalPeriod
def get_result_joint_displacement(self,name,loadcase):
"""
Get the result in the database.
params:
name: str, name of joint
loadcase: str, name of loadcase
return: list of float, displacement u1,u2,u3,r1,r2,r3
"""
res=self.session.query(ResultJointDisplacement).filter_by(joint_name=name,loadcase_name=loadcase).first()
if res==None:
return None
else:
scale=self.scale()
return [res.u1/scale['L'],res.u2/scale['L'],res.u3/scale['L'],
res.r1,res.r2,res.r3]
def get_result_joint_reaction(self,name,loadcase):
"""
Get the result in the database.
params:
name: str, name of joint
loadcase: str, name of loadcase
return: list of float, reaction in u1,u2,u3,r1,r2,r3
"""
res=self.session.query(ResultJointReaction).filter_by(joint_name=name,loadcase_name=loadcase).first()
if res==None:
return None
else:
scale=self.scale()
return [res.p1/scale['F'],res.p2/scale['F'],res.p3/scale['F'],
res.m1/scale['F']/scale['L'],res.m2/scale['F']/scale['L'],res.m3/scale['F']/scale['L']]
def get_result_frame_force(self,name,loadcase):
"""
Get the result in the database.
params:
name: str, name of frame
loadcase: str, name of loadcase
return: list of float, forces in both ends.
"""
reses=self.session.query(ResultFrameForce).filter_by(frame_name=name,loadcase_name=loadcase).all()
if len(reses)==0:
return None
else:
scale=self.scale()
forces=[]
for res in reses:
forces.append([res.p01/scale['F'],res.p02/scale['F'],res.p03/scale['F'],
res.m01/scale['F']/scale['L'],res.m02/scale['F']/scale['L'],res.m03/scale['F']/scale['L'],
res.p11/scale['F'],res.p12/scale['F'],res.p13/scale['F'],
res.m11/scale['F']/scale['L'],res.m12/scale['F']/scale['L'],res.m13/scale['F']/scale['L']])
return forces
def get_result_period(self,loadcase,order='all'):
"""
Get the result in the database.
params:
loadcase: str, name of loadcase
order: 'all' or int. order to find.
return: list of period
"""
res=self.session.query(ResultModalPeriod).filter_by(loadcase_name=loadcase)
if order=='all':
return [r.period for r in res.all()]
elif type(order)==int:
res=res.filter_by(order=order).all()
return [r.period for r in res.all()] | PypiClean |
/IDS_XS_TOOLS_UiS-0.3-py3-none-any.whl/ids_xs_tools/ImageFunctions.py | import cv2
from ids_xs_tools.QR_Reader import QR_Scanner, QR_Scanner_visualized
import ids_xs_tools.config as config
from pyueye import ueye
import time
from ids_xs_tools.pyueye_example_utils import ImageData, ImageBuffer
import ids_xs_tools.OpenCV_to_RAPID as OpenCV_to_RAPID
# TODO: Extend the program with threading, this will allow the camera to always stay active
# and could give a live feed of what the camera sees while still maintaining control over robot.
def capture_image(cam, gripper_height):
camera_height = gripper_height + 70 # Camera is placed 70mm above gripper
# TODO: Find a curve that correlates distance from subject and focus value
if camera_height > 300:
nRet = ueye.is_Focus(cam.handle(), ueye.FOC_CMD_SET_MANUAL_FOCUS,
config.focus_overview, ueye.sizeof(config.focus_overview))
else:
nRet = ueye.is_Focus(cam.handle(), ueye.FOC_CMD_SET_MANUAL_FOCUS,
config.focus_closeup, ueye.sizeof(config.focus_closeup))
#nRet = ueye.is_Focus(cam.handle(), ueye.FOC_CMD_SET_ENABLE_AUTOFOCUS_ONCE, None, 0)
# autofocus_status = ueye.INT(0)
# ueye.is_Focus(cam.handle(), ueye.FOC_CMD_GET_AUTOFOCUS_STATUS, autofocus_status, ueye.sizeof(autofocus_status))
img_buffer = ImageBuffer() # Create image buffer
#ueye.is_Focus(cam.handle(), ueye.FOC_CMD_SET_ENABLE_AUTOFOCUS_ONCE, None, 0)
time.sleep(0.5)
# cam.freeze_video(True) # Freeze video captures a single image
nRet = ueye.is_WaitForNextImage(cam.handle(), 1000, img_buffer.mem_ptr, img_buffer.mem_id)
img_data = ImageData(cam.handle(), img_buffer)
array = img_data.as_1d_image()
img_data.unlock()
return array
def findPucks(cam, robot, robtarget_pucks, cam_comp=False):
"""Finds all pucks in the frame of the camera by capturing an image and scanning the image for QR codes.
After the codes have been pinpointed, a series of transformations happen to finally create robtargets
which can be sent to RobotWare."""
temp_puck_list = [] # Temporary puck list
trans, rot = robot.get_gripper_position()
gripper_height = robot.get_gripper_height()
cam_pos = OpenCV_to_RAPID.get_camera_position(trans=trans, rot=rot)
image = capture_image(cam=cam, gripper_height=gripper_height)
# Scan the image and return all QR code positions
puck_list = QR_Scanner(image)
# Check if the QR codes that were found have already been located previously.
# If so, remove them from the temporary list.
for puck in temp_puck_list:
if any(puck == x for x in robtarget_pucks):
puck_list.remove(puck)
for puck in puck_list:
OpenCV_to_RAPID.create_robtarget(gripper_height=gripper_height, gripper_rot=rot, cam_pos=cam_pos, puck=puck,
cam_comp=cam_comp)
robtarget_pucks.append(puck)
return robtarget_pucks
def showVideo(cam):
nRet = ueye.is_CaptureVideo(cam.handle(), ueye.IS_DONT_WAIT)
img_buffer = ImageBuffer()
img_data = ImageData(cam.handle(), img_buffer)
while True:
nRet = ueye.is_WaitForNextImage(cam.handle(), 1000, img_buffer.mem_ptr, img_buffer.mem_id)
array = img_data.as_1d_image()
#scanned_img = QR_Scanner_visualized(array)
cv2.imshow("hei", array)
img_data.unlock()
if cv2.waitKey(10) & 0xFF == ord('q'):
break
def is_blurry(img, threshold):
laplacian_variance = cv2.Laplacian(img, cv2.CV_64F).var()
print("Blur", laplacian_variance)
if laplacian_variance > threshold:
return False
else:
return True | PypiClean |
/Buildpan_CLI-1.0-py3-none-any.whl/ci_commands/init.py | import requests
import json
import pathlib
import datetime
import click
from buildpan import github_webhook, bitbucket_webhook, encrypt
from buildpan import setting, yaml_reader
from pyfiglet import Figlet
info = setting.info
# getting env variable
fetch_log = info["FETCH_LOG_URL"]
@click.command()
def init():
'''
For initiating CI-CD operation
\f
'''
f = Figlet(font='slant')
print (f.renderText('Buildpan'))
path=pathlib.Path().resolve()
print("Your current directory is : ", path)
try:
yaml_reader.yaml_reader(path)
project_id = yaml_reader.yaml_reader.project_id
response = requests.get("https://app.buildpan.com/api/v1/projects/detail/"+project_id)
data = response.json()
provider = data['project']["provider"]
enc_key = b'CgOc_6PmZq8fYXriMbXF0Yk27VT2RVyeiiobUd3DzR4='
curtime = datetime.datetime.now()
# github access
if provider == "github":
print("\nConnecting to remote repository...")
name = data["project"]["repo"]["full_name"].split('/')
token = data["project"]["repo"]["access_token"]
username = name[0]
repo_name = name[1]
try:
dictionary ={
"token" : token,
"username" : username,
"repo_name" : repo_name,
}
# Serializing json
json_object = json.dumps(dictionary, indent = 4)
# Writing to sample.json
with open(project_id+'.json',"w") as outfile:
outfile.write(json_object)
#Reading from json file
with open(project_id+'.json') as file:
info = json.load(file)
username = info["username"]
token = info["token"]
repo_name = info["repo_name"]
# encrypting a json file
enc = encrypt.Encryptor()
enc.encrypt_file(enc_key, project_id+'.json')
requests.post(fetch_log + "?" +'project_id='+project_id+'&repo_name='+repo_name+'&Time='+str(curtime)+'&user_name='+username+'&message=initialization performed for github - '+repo_name+'&status=success&operation=init')
github_webhook.github(project_id, path, token, username, repo_name, provider)
except Exception as e:
requests.post(fetch_log + "?" +'project_id='+project_id+'&repo_name='+repo_name+'&Time='+str(curtime)+'&user_name='+username+'&message=initialization performed for github - '+repo_name+' failed.'+str(e)+'&status=failed&operation=init')
# bitbucket access
elif provider == "bitbucket":
print("\nConnecting to remote repository...")
name = data["project"]["repo"]["full_name"].split('/')
token = data["project"]["repo"]["refresh_token"]
username = name[0]
repo_name = name[1]
try:
dictionary ={
"refresh_token" : token,
"username" : username,
"repo_name" : repo_name,
}
# Serializing json
json_object = json.dumps(dictionary, indent = 4)
# Writing to sample.json
with open(project_id+'.json',"w") as outfile:
outfile.write(json_object)
# Reading from json file
with open(project_id+'.json') as file:
info = json.load(file)
username = info["username"]
refresh_token = info["refresh_token"]
repo_name = info["repo_name"]
# encrypting a json file
enc = encrypt.Encryptor()
enc.encrypt_file(enc_key, project_id+'.json')
requests.post(fetch_log + "?" +'project_id='+project_id+'&repo_name='+repo_name+'&Time='+str(curtime)+'&user_name='+username+'&message=initialization performed for bitbucket - '+repo_name+'&status=success&operation=init')
bitbucket_webhook.bitbucket(project_id, path, refresh_token, username, repo_name, provider)
except Exception as e:
requests.post(fetch_log + "?" +'project_id='+project_id+'&repo_name='+repo_name+'&Time='+str(curtime)+'&user_name='+username+'&message=initialization performed for bitbucket - '+repo_name+' failed.'+str(e)+'&status=failed&operation=init')
except Exception as e:
print("config file not found or not in proper format.")
print("Message : " + str(e))
print("Initialization failed")
try:
requests.post(fetch_log + "?" +'project_id='+project_id+'&repo_name= &Time='+str(curtime)+'&user_name= &message=initialization failed.'+str(e)+'&status=failed&operation=init')
except:
pass | PypiClean |
/BiblioPixel-3.4.46.tar.gz/BiblioPixel-3.4.46/bibliopixel/drivers/SPI/APA102.py | from ... colors import gamma
from .. channel_order import ChannelOrder
from . base import SPIBase
class APA102(SPIBase):
"""Driver for APA102/SK9822 based LED strips on devices like
the Raspberry Pi and BeagleBone
Provides the same parameters as
:py:class:`bibliopixel.drivers.SPI.SPIBase`
"""
def __init__(self, num, gamma=gamma.APA102, **kwargs):
super().__init__(num, gamma=gamma, **kwargs)
# APA102/SK9822 requires latch bytes at the end)
# Many thanks to this article for combined APA102/SK9822 protocol
# https://cpldcpu.com/2016/12/13/sk9822-a-clone-of-the-apa102/
self._start_frame = 4 # start frame is [0, 0, 0, 0]
self._pixel_bytes = self.numLEDs * 4 # 4 byte frames [bright, r, g, b]
self._pixel_stop = self._start_frame + self._pixel_bytes
self._reset_frame = 4 # for SK9822 [0, 0, 0, 0]
self._end_frame = (num // 2) + 1
self._packet = self.maker.bytes(self._start_frame + self._pixel_bytes +
self._reset_frame + self._end_frame)
self.set_device_brightness(0xFF) # required to setup _packet
def set_device_brightness(self, val):
"""
APA102 & SK9822 support on-chip brightness control, allowing greater
color depth.
APA102 superimposes a 440Hz PWM on the 19kHz base PWM to control
brightness. SK9822 uses a base 4.7kHz PWM but controls brightness with a
variable current source.
Because of this SK9822 will have much less flicker at lower levels.
Either way, this option is better and faster than scaling in
BiblioPixel.
"""
# bitshift to scale from 8 bit to 5
self._chipset_brightness = (val >> 3)
self._brightness_list = [0xE0 + self._chipset_brightness] * self.numLEDs
self._packet[self._start_frame:self._pixel_stop:4] = (
self._brightness_list)
def _compute_packet(self):
self._render()
self._packet[self._start_frame + 1:self._pixel_stop:4] = self._buf[0::3]
self._packet[self._start_frame + 2:self._pixel_stop:4] = self._buf[1::3]
self._packet[self._start_frame + 3:self._pixel_stop:4] = self._buf[2::3] | PypiClean |
/Annalist-0.5.18.tar.gz/Annalist-0.5.18/annalist_root/annalist/views/fields/render_file_upload.py | from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for file upload used to upload a resource into
the local data store.
"""
__author__ = "Graham Klyne ([email protected])"
__copyright__ = "Copyright 2014, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from utils.py3porting import is_string
from django.template import Template, Context
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_field_edit_value,
get_field_view_value
)
# ----------------------------------------------------------------------------
#
# Link URI value mapping
#
# ----------------------------------------------------------------------------
def upload_field_value(data_value):
"""
Construct field value in expected format for remaining processing
"""
if data_value:
if is_string(data_value):
data_value = (
{ 'resource_name': "uploaded.data"
, 'uploaded_file': data_value
})
else:
# Also for empty string case
data_value = (
{ 'resource_name': "uploaded.data"
, 'uploaded_file': ""
})
return data_value
class FileUploadValueMapper(RenderBase):
"""
Field rendering class for file upload field.
"""
@classmethod
def resource_name(cls, data_value):
"""
Extracts import URL ref from value structure, for field display.
"""
return upload_field_value(data_value).get('resource_name', "(@@resource_name not present)")
@classmethod
def uploaded_file(cls, data_value):
"""
Extracts uploaded filename from value structure, for field display.
"""
return upload_field_value(data_value).get('uploaded_file', "")
@classmethod
def encode(cls, data_value):
"""
Extracts import URL ref from value structure, for field display.
"""
return cls.resource_name(upload_field_value(data_value))
@classmethod
def decode(cls, field_value):
"""
Returns textual path value from file upload field value
"""
return field_value or ""
def decode_store(self, field_value, entityvals, property_uri):
"""
Decodes a supplied value and uses it to update the 'upload_file'
field of an URI import field.
"""
u = self.decode(field_value)
v = entityvals.get(property_uri, {})
# try:
# v['resource_name'] = u
# except TypeError:
# v = {'resource_name': u} # Ignore non-updatable value
entityvals[property_uri] = upload_field_value(v)
return v
# ----------------------------------------------------------------------------
#
# Import value templates
#
# ----------------------------------------------------------------------------
# NOTE: this is a minimal rendering. Enhancements might include additional information
# from the entity field, especially for the view (e.g. content-type, etc.)
# NOTE: The <a> element supports a `type` attribute
# (cf. https://developer.mozilla.org/en-US/docs/Web/HTML/Element/a)
view_upload = (
"""Uploaded file <a href="%s" target="_blank">%s</a>""")
edit_upload = (
"""<!-- fields.render_file_upload -->
<input type="file" name="{{repeat_prefix}}{{field.description.field_name}}"
placeholder="{{field.description.field_placeholder}}"
value="{{resource_name}}" />
{% if uploaded_file != "" %}
Previously uploaded: {{uploaded_file}}
{% endif %}
""")
# ----------------------------------------------------------------------------
#
# Link URI field renderers
#
# ----------------------------------------------------------------------------
class File_upload_view_renderer(object):
def render(self, context):
"""
Render import link for viewing.
"""
val = get_field_view_value(context, None)
linkval = FileUploadValueMapper.resource_name(val)
textval = FileUploadValueMapper.uploaded_file(val)
return view_upload%(linkval, textval)
class File_upload_edit_renderer(object):
def __init__(self):
self._template = Template(edit_upload)
return
def render(self, context):
"""
Render import link for editing
"""
val = get_field_edit_value(context, None)
resource_name = FileUploadValueMapper.resource_name(val)
uploaded_file = FileUploadValueMapper.uploaded_file(val)
# log.info("@@File_upload_edit_renderer.render: %s %s"%(resource_name, uploaded_file))
with context.push(resource_name=resource_name, uploaded_file=uploaded_file):
result = self._template.render(context)
# log.info("@@File_upload_edit_renderer.render: %s"%(result,))
return result
def get_file_upload_renderer():
"""
Return field renderer object for file upload
"""
return RenderFieldValue("file_upload",
view_renderer=File_upload_view_renderer(),
edit_renderer=File_upload_edit_renderer(),
)
# End. | PypiClean |
/HavNegpy-1.2.tar.gz/HavNegpy-1.2/docs/_build/html/_build/doctrees/nbsphinx/_build/doctrees/nbsphinx/_build/doctrees/nbsphinx/_build/html/hn_module_tutorial.ipynb | # Tutorial for the HN module of HavNegpy package
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import HavNegpy as dd
%matplotlib qt
os.chdir(r'M:\Marshall_Data\mohamed_data\mohamed_data\n44')
def create_dataframe(f):
col_names = ['Freq', 'T', 'Eps1', 'Eps2']
#f = input(str("Enter the filename:"))
df = pd.read_csv(f, sep=r"\s+",index_col=False,usecols = [0,1,2,3],names=col_names,header=None,skiprows=4,encoding='unicode_escape',engine='python')
col1 = ['log f']
for start in range(0, len(df), 63):
name = df['T'][start]
#print(name)
col1.append(name)
df2 = pd.DataFrame()
f1 = df['Freq'][0:63].values
x1 = np.log10((f1))
e = pd.DataFrame(x1)
df2['log f'] = pd.concat([e],axis=1,ignore_index=True)
global Cooling,Heating
for start in range(0, len(df), 63):
f = df['Eps2'][start:start+63].values
ep = np.log10(f)
d = pd.DataFrame(ep)
df2[start] = pd.concat([d],axis=1,ignore_index=True)
df2.columns = col1
'''
a = int(len(col1)/3)
b = 2*a
c = int(len(col1)) - b
Heating1 = df2.iloc[8:,0:a+1]
Cooling = df2.iloc[8:,a+1:b+1]
Heating2 = df2.iloc[8:,b+1:]
heat1_col = col1[0:a+1]
cool_col = col1[a+1:b+1]
heat2_col = col1[b+1:]
Cooling.columns = cool_col
Heating1.columns = heat1_col
Heating2.columns = heat2_col
f2 = df['Freq'][8:59].values
x2 = np.log10((f2))
Cooling['Freq'] = x2
Heating1['Freq'] = x2
Heating2['Freq'] = x2
'''
Cooling = df2.iloc[:,0:25]
Heating = df2.iloc[:,25:]
return df,df2,Cooling,Heating #Heating2
df,df2,cool,heat = create_dataframe('EPS.TXT')
x,y = df2['log f'][9:], heat[40][9:]
plt.figure()
plt.scatter(x,y,label='data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('Example for HN fitting')
```
image of the plot we are using in this tutorial

```
''' instantiate the HN module from HavgNegpy'''
hn = dd.HN()
''' select range to perform hn fitting'''
''' the select range functions pops in a separate window and allows you two clicks to select the region of interest (ROI)'''
''' In this tutorial, I'll plot the ROI and append as an image in the next cell'''
x1,y1 = hn.select_range(x,y)
''' view the data from select range'''
plt.scatter(x1,y1,label = 'Data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('ROI selected from HN module')
```
image of the ROI from HN module
```
''' dump the initial guess parameters using dump parameters method (varies for each fn), which dumps the parameters in a json file'''
''' this is required before performing the first fitting as it takes the initial guess from the json file created'''
hn.dump_parameters_hn()
''' view the initial guess for the ROI using initial_view method'''
''' I'll append the image in the next cell'''
hn.initial_view_hn(x1,y1)
```
image of the initial guess
```
''' pefrorm least squares fitting'''
''' The image of the curve fit is added in the next cell '''
hn.fit(x1,y1)
```
Example of the fit performed using single HN function
the procedure is similar for double HN and HN with conductivity

```
'''create a file to save fit results using create_analysis file method'''
''' before saving fit results an analysis file has to be created '''
hn.create_analysis_file()
''' save the fit results using save_fit method of the corresponding fit function'''
''' takes one argument, read more on the documentation'''
hn.save_fit_hn(1)
```
| PypiClean |
/django-chuck-0.2.3.tar.gz/django-chuck/modules/feincms/project/static/scripts/libs/tiny_mce/plugins/table/editor_plugin.js | (function(d){var e=d.each;function c(g,h){var j=h.ownerDocument,f=j.createRange(),k;f.setStartBefore(h);f.setEnd(g.endContainer,g.endOffset);k=j.createElement("body");k.appendChild(f.cloneContents());return k.innerHTML.replace(/<(br|img|object|embed|input|textarea)[^>]*>/gi,"-").replace(/<[^>]+>/g,"").length==0}function a(g,f){return parseInt(g.getAttribute(f)||1)}function b(H,G,K){var g,L,D,o;t();o=G.getParent(K.getStart(),"th,td");if(o){L=F(o);D=I();o=z(L.x,L.y)}function A(N,M){N=N.cloneNode(M);N.removeAttribute("id");return N}function t(){var M=0;g=[];e(["thead","tbody","tfoot"],function(N){var O=G.select("> "+N+" tr",H);e(O,function(P,Q){Q+=M;e(G.select("> td, > th",P),function(W,R){var S,T,U,V;if(g[Q]){while(g[Q][R]){R++}}U=a(W,"rowspan");V=a(W,"colspan");for(T=Q;T<Q+U;T++){if(!g[T]){g[T]=[]}for(S=R;S<R+V;S++){g[T][S]={part:N,real:T==Q&&S==R,elm:W,rowspan:U,colspan:V}}}})});M+=O.length})}function z(M,O){var N;N=g[O];if(N){return N[M]}}function s(O,M,N){if(O){N=parseInt(N);if(N===1){O.removeAttribute(M,1)}else{O.setAttribute(M,N,1)}}}function j(M){return M&&(G.hasClass(M.elm,"mceSelected")||M==o)}function k(){var M=[];e(H.rows,function(N){e(N.cells,function(O){if(G.hasClass(O,"mceSelected")||O==o.elm){M.push(N);return false}})});return M}function r(){var M=G.createRng();M.setStartAfter(H);M.setEndAfter(H);K.setRng(M);G.remove(H)}function f(M){var N;d.walk(M,function(P){var O;if(P.nodeType==3){e(G.getParents(P.parentNode,null,M).reverse(),function(Q){Q=A(Q,false);if(!N){N=O=Q}else{if(O){O.appendChild(Q)}}O=Q});if(O){O.innerHTML=d.isIE?" ":'<br data-mce-bogus="1" />'}return false}},"childNodes");M=A(M,false);s(M,"rowSpan",1);s(M,"colSpan",1);if(N){M.appendChild(N)}else{if(!d.isIE){M.innerHTML='<br data-mce-bogus="1" />'}}return M}function q(){var M=G.createRng();e(G.select("tr",H),function(N){if(N.cells.length==0){G.remove(N)}});if(G.select("tr",H).length==0){M.setStartAfter(H);M.setEndAfter(H);K.setRng(M);G.remove(H);return}e(G.select("thead,tbody,tfoot",H),function(N){if(N.rows.length==0){G.remove(N)}});t();row=g[Math.min(g.length-1,L.y)];if(row){K.select(row[Math.min(row.length-1,L.x)].elm,true);K.collapse(true)}}function u(S,Q,U,R){var P,N,M,O,T;P=g[Q][S].elm.parentNode;for(M=1;M<=U;M++){P=G.getNext(P,"tr");if(P){for(N=S;N>=0;N--){T=g[Q+M][N].elm;if(T.parentNode==P){for(O=1;O<=R;O++){G.insertAfter(f(T),T)}break}}if(N==-1){for(O=1;O<=R;O++){P.insertBefore(f(P.cells[0]),P.cells[0])}}}}}function C(){e(g,function(M,N){e(M,function(P,O){var S,R,T,Q;if(j(P)){P=P.elm;S=a(P,"colspan");R=a(P,"rowspan");if(S>1||R>1){s(P,"rowSpan",1);s(P,"colSpan",1);for(Q=0;Q<S-1;Q++){G.insertAfter(f(P),P)}u(O,N,R-1,S)}}})})}function p(V,S,Y){var P,O,X,W,U,R,T,M,V,N,Q;if(V){pos=F(V);P=pos.x;O=pos.y;X=P+(S-1);W=O+(Y-1)}else{P=L.x;O=L.y;X=D.x;W=D.y}T=z(P,O);M=z(X,W);if(T&&M&&T.part==M.part){C();t();T=z(P,O).elm;s(T,"colSpan",(X-P)+1);s(T,"rowSpan",(W-O)+1);for(R=O;R<=W;R++){for(U=P;U<=X;U++){if(!g[R]||!g[R][U]){continue}V=g[R][U].elm;if(V!=T){N=d.grep(V.childNodes);e(N,function(Z){T.appendChild(Z)});if(N.length){N=d.grep(T.childNodes);Q=0;e(N,function(Z){if(Z.nodeName=="BR"&&G.getAttrib(Z,"data-mce-bogus")&&Q++<N.length-1){T.removeChild(Z)}})}G.remove(V)}}}q()}}function l(Q){var M,S,P,R,T,U,N,V,O;e(g,function(W,X){e(W,function(Z,Y){if(j(Z)){Z=Z.elm;T=Z.parentNode;U=A(T,false);M=X;if(Q){return false}}});if(Q){return !M}});for(R=0;R<g[0].length;R++){if(!g[M][R]){continue}S=g[M][R].elm;if(S!=P){if(!Q){O=a(S,"rowspan");if(O>1){s(S,"rowSpan",O+1);continue}}else{if(M>0&&g[M-1][R]){V=g[M-1][R].elm;O=a(V,"rowSpan");if(O>1){s(V,"rowSpan",O+1);continue}}}N=f(S);s(N,"colSpan",S.colSpan);U.appendChild(N);P=S}}if(U.hasChildNodes()){if(!Q){G.insertAfter(U,T)}else{T.parentNode.insertBefore(U,T)}}}function h(N){var O,M;e(g,function(P,Q){e(P,function(S,R){if(j(S)){O=R;if(N){return false}}});if(N){return !O}});e(g,function(S,T){var P,Q,R;if(!S[O]){return}P=S[O].elm;if(P!=M){R=a(P,"colspan");Q=a(P,"rowspan");if(R==1){if(!N){G.insertAfter(f(P),P);u(O,T,Q-1,R)}else{P.parentNode.insertBefore(f(P),P);u(O,T,Q-1,R)}}else{s(P,"colSpan",P.colSpan+1)}M=P}})}function n(){var M=[];e(g,function(N,O){e(N,function(Q,P){if(j(Q)&&d.inArray(M,P)===-1){e(g,function(T){var R=T[P].elm,S;S=a(R,"colSpan");if(S>1){s(R,"colSpan",S-1)}else{G.remove(R)}});M.push(P)}})});q()}function m(){var N;function M(Q){var P,R,O;P=G.getNext(Q,"tr");e(Q.cells,function(S){var T=a(S,"rowSpan");if(T>1){s(S,"rowSpan",T-1);R=F(S);u(R.x,R.y,1,1)}});R=F(Q.cells[0]);e(g[R.y],function(S){var T;S=S.elm;if(S!=O){T=a(S,"rowSpan");if(T<=1){G.remove(S)}else{s(S,"rowSpan",T-1)}O=S}})}N=k();e(N.reverse(),function(O){M(O)});q()}function E(){var M=k();G.remove(M);q();return M}function J(){var M=k();e(M,function(O,N){M[N]=A(O,true)});return M}function B(O,N){var P=k(),M=P[N?0:P.length-1],Q=M.cells.length;e(g,function(S){var R;Q=0;e(S,function(U,T){if(U.real){Q+=U.colspan}if(U.elm.parentNode==M){R=1}});if(R){return false}});if(!N){O.reverse()}e(O,function(T){var S=T.cells.length,R;for(i=0;i<S;i++){R=T.cells[i];s(R,"colSpan",1);s(R,"rowSpan",1)}for(i=S;i<Q;i++){T.appendChild(f(T.cells[S-1]))}for(i=Q;i<S;i++){G.remove(T.cells[i])}if(N){M.parentNode.insertBefore(T,M)}else{G.insertAfter(T,M)}})}function F(M){var N;e(g,function(O,P){e(O,function(R,Q){if(R.elm==M){N={x:Q,y:P};return false}});return !N});return N}function w(M){L=F(M)}function I(){var O,N,M;N=M=0;e(g,function(P,Q){e(P,function(S,R){var U,T;if(j(S)){S=g[Q][R];if(R>N){N=R}if(Q>M){M=Q}if(S.real){U=S.colspan-1;T=S.rowspan-1;if(U){if(R+U>N){N=R+U}}if(T){if(Q+T>M){M=Q+T}}}}})});return{x:N,y:M}}function v(S){var P,O,U,T,N,M,Q,R;D=F(S);if(L&&D){P=Math.min(L.x,D.x);O=Math.min(L.y,D.y);U=Math.max(L.x,D.x);T=Math.max(L.y,D.y);N=U;M=T;for(y=O;y<=M;y++){S=g[y][P];if(!S.real){if(P-(S.colspan-1)<P){P-=S.colspan-1}}}for(x=P;x<=N;x++){S=g[O][x];if(!S.real){if(O-(S.rowspan-1)<O){O-=S.rowspan-1}}}for(y=O;y<=T;y++){for(x=P;x<=U;x++){S=g[y][x];if(S.real){Q=S.colspan-1;R=S.rowspan-1;if(Q){if(x+Q>N){N=x+Q}}if(R){if(y+R>M){M=y+R}}}}}G.removeClass(G.select("td.mceSelected,th.mceSelected"),"mceSelected");for(y=O;y<=M;y++){for(x=P;x<=N;x++){if(g[y][x]){G.addClass(g[y][x].elm,"mceSelected")}}}}}d.extend(this,{deleteTable:r,split:C,merge:p,insertRow:l,insertCol:h,deleteCols:n,deleteRows:m,cutRows:E,copyRows:J,pasteRows:B,getPos:F,setStartCell:w,setEndCell:v})}d.create("tinymce.plugins.TablePlugin",{init:function(g,h){var f,m,j=true;function l(p){var o=g.selection,n=g.dom.getParent(p||o.getNode(),"table");if(n){return new b(n,g.dom,o)}}function k(){g.getBody().style.webkitUserSelect="";if(j){g.dom.removeClass(g.dom.select("td.mceSelected,th.mceSelected"),"mceSelected");j=false}}e([["table","table.desc","mceInsertTable",true],["delete_table","table.del","mceTableDelete"],["delete_col","table.delete_col_desc","mceTableDeleteCol"],["delete_row","table.delete_row_desc","mceTableDeleteRow"],["col_after","table.col_after_desc","mceTableInsertColAfter"],["col_before","table.col_before_desc","mceTableInsertColBefore"],["row_after","table.row_after_desc","mceTableInsertRowAfter"],["row_before","table.row_before_desc","mceTableInsertRowBefore"],["row_props","table.row_desc","mceTableRowProps",true],["cell_props","table.cell_desc","mceTableCellProps",true],["split_cells","table.split_cells_desc","mceTableSplitCells",true],["merge_cells","table.merge_cells_desc","mceTableMergeCells",true]],function(n){g.addButton(n[0],{title:n[1],cmd:n[2],ui:n[3]})});if(!d.isIE){g.onClick.add(function(n,o){o=o.target;if(o.nodeName==="TABLE"){n.selection.select(o);n.nodeChanged()}})}g.onPreProcess.add(function(o,p){var n,q,r,t=o.dom,s;n=t.select("table",p.node);q=n.length;while(q--){r=n[q];t.setAttrib(r,"data-mce-style","");if((s=t.getAttrib(r,"width"))){t.setStyle(r,"width",s);t.setAttrib(r,"width","")}if((s=t.getAttrib(r,"height"))){t.setStyle(r,"height",s);t.setAttrib(r,"height","")}}});g.onNodeChange.add(function(q,o,s){var r;s=q.selection.getStart();r=q.dom.getParent(s,"td,th,caption");o.setActive("table",s.nodeName==="TABLE"||!!r);if(r&&r.nodeName==="CAPTION"){r=0}o.setDisabled("delete_table",!r);o.setDisabled("delete_col",!r);o.setDisabled("delete_table",!r);o.setDisabled("delete_row",!r);o.setDisabled("col_after",!r);o.setDisabled("col_before",!r);o.setDisabled("row_after",!r);o.setDisabled("row_before",!r);o.setDisabled("row_props",!r);o.setDisabled("cell_props",!r);o.setDisabled("split_cells",!r);o.setDisabled("merge_cells",!r)});g.onInit.add(function(r){var p,t,q=r.dom,u;f=r.windowManager;r.onMouseDown.add(function(w,z){if(z.button!=2){k();t=q.getParent(z.target,"td,th");p=q.getParent(t,"table")}});q.bind(r.getDoc(),"mouseover",function(C){var A,z,B=C.target;if(t&&(u||B!=t)&&(B.nodeName=="TD"||B.nodeName=="TH")){z=q.getParent(B,"table");if(z==p){if(!u){u=l(z);u.setStartCell(t);r.getBody().style.webkitUserSelect="none"}u.setEndCell(B);j=true}A=r.selection.getSel();try{if(A.removeAllRanges){A.removeAllRanges()}else{A.empty()}}catch(w){}C.preventDefault()}});r.onMouseUp.add(function(F,G){var z,B=F.selection,H,I=B.getSel(),w,C,A,E;if(t){if(u){F.getBody().style.webkitUserSelect=""}function D(J,L){var K=new d.dom.TreeWalker(J,J);do{if(J.nodeType==3&&d.trim(J.nodeValue).length!=0){if(L){z.setStart(J,0)}else{z.setEnd(J,J.nodeValue.length)}return}if(J.nodeName=="BR"){if(L){z.setStartBefore(J)}else{z.setEndBefore(J)}return}}while(J=(L?K.next():K.prev()))}H=q.select("td.mceSelected,th.mceSelected");if(H.length>0){z=q.createRng();C=H[0];E=H[H.length-1];z.setStartBefore(C);z.setEndAfter(C);D(C,1);w=new d.dom.TreeWalker(C,q.getParent(H[0],"table"));do{if(C.nodeName=="TD"||C.nodeName=="TH"){if(!q.hasClass(C,"mceSelected")){break}A=C}}while(C=w.next());D(A);B.setRng(z)}F.nodeChanged();t=u=p=null}});r.onKeyUp.add(function(w,z){k()});r.onKeyDown.add(function(w,z){n(w)});r.onMouseDown.add(function(w,z){if(z.button!=2){n(w)}});function o(D,z,A,F){var B=3,G=D.dom.getParent(z.startContainer,"TABLE"),C,w,E;if(G){C=G.parentNode}w=z.startContainer.nodeType==B&&z.startOffset==0&&z.endOffset==0&&F&&(A.nodeName=="TR"||A==C);E=(A.nodeName=="TD"||A.nodeName=="TH")&&!F;return w||E}function n(A){if(!d.isWebKit){return}var z=A.selection.getRng();var C=A.selection.getNode();var B=A.dom.getParent(z.startContainer,"TD,TH");if(!o(A,z,C,B)){return}if(!B){B=C}var w=B.lastChild;while(w.lastChild){w=w.lastChild}z.setEnd(w,w.nodeValue.length);A.selection.setRng(z)}r.plugins.table.fixTableCellSelection=n;if(r&&r.plugins.contextmenu){r.plugins.contextmenu.onContextMenu.add(function(A,w,C){var D,B=r.selection,z=B.getNode()||r.getBody();if(r.dom.getParent(C,"td")||r.dom.getParent(C,"th")||r.dom.select("td.mceSelected,th.mceSelected").length){w.removeAll();if(z.nodeName=="A"&&!r.dom.getAttrib(z,"name")){w.add({title:"advanced.link_desc",icon:"link",cmd:r.plugins.advlink?"mceAdvLink":"mceLink",ui:true});w.add({title:"advanced.unlink_desc",icon:"unlink",cmd:"UnLink"});w.addSeparator()}if(z.nodeName=="IMG"&&z.className.indexOf("mceItem")==-1){w.add({title:"advanced.image_desc",icon:"image",cmd:r.plugins.advimage?"mceAdvImage":"mceImage",ui:true});w.addSeparator()}w.add({title:"table.desc",icon:"table",cmd:"mceInsertTable",value:{action:"insert"}});w.add({title:"table.props_desc",icon:"table_props",cmd:"mceInsertTable"});w.add({title:"table.del",icon:"delete_table",cmd:"mceTableDelete"});w.addSeparator();D=w.addMenu({title:"table.cell"});D.add({title:"table.cell_desc",icon:"cell_props",cmd:"mceTableCellProps"});D.add({title:"table.split_cells_desc",icon:"split_cells",cmd:"mceTableSplitCells"});D.add({title:"table.merge_cells_desc",icon:"merge_cells",cmd:"mceTableMergeCells"});D=w.addMenu({title:"table.row"});D.add({title:"table.row_desc",icon:"row_props",cmd:"mceTableRowProps"});D.add({title:"table.row_before_desc",icon:"row_before",cmd:"mceTableInsertRowBefore"});D.add({title:"table.row_after_desc",icon:"row_after",cmd:"mceTableInsertRowAfter"});D.add({title:"table.delete_row_desc",icon:"delete_row",cmd:"mceTableDeleteRow"});D.addSeparator();D.add({title:"table.cut_row_desc",icon:"cut",cmd:"mceTableCutRow"});D.add({title:"table.copy_row_desc",icon:"copy",cmd:"mceTableCopyRow"});D.add({title:"table.paste_row_before_desc",icon:"paste",cmd:"mceTablePasteRowBefore"}).setDisabled(!m);D.add({title:"table.paste_row_after_desc",icon:"paste",cmd:"mceTablePasteRowAfter"}).setDisabled(!m);D=w.addMenu({title:"table.col"});D.add({title:"table.col_before_desc",icon:"col_before",cmd:"mceTableInsertColBefore"});D.add({title:"table.col_after_desc",icon:"col_after",cmd:"mceTableInsertColAfter"});D.add({title:"table.delete_col_desc",icon:"delete_col",cmd:"mceTableDeleteCol"})}else{w.add({title:"table.desc",icon:"table",cmd:"mceInsertTable"})}})}if(d.isWebKit){function v(C,N){var L=d.VK;var Q=N.keyCode;function O(Y,U,S){var T=Y?"previousSibling":"nextSibling";var Z=C.dom.getParent(U,"tr");var X=Z[T];if(X){z(C,U,X,Y);d.dom.Event.cancel(S);return true}else{var aa=C.dom.getParent(Z,"table");var W=Z.parentNode;var R=W.nodeName.toLowerCase();if(R==="tbody"||R===(Y?"tfoot":"thead")){var V=w(Y,aa,W,"tbody");if(V!==null){return K(Y,V,U,S)}}return M(Y,Z,T,aa,S)}}function w(V,T,U,X){var S=C.dom.select(">"+X,T);var R=S.indexOf(U);if(V&&R===0||!V&&R===S.length-1){return B(V,T)}else{if(R===-1){var W=U.tagName.toLowerCase()==="thead"?0:S.length-1;return S[W]}else{return S[R+(V?-1:1)]}}}function B(U,T){var S=U?"thead":"tfoot";var R=C.dom.select(">"+S,T);return R.length!==0?R[0]:null}function K(V,T,S,U){var R=J(T,V);R&&z(C,S,R,V);d.dom.Event.cancel(U);return true}function M(Y,U,R,X,W){var S=X[R];if(S){F(S);return true}else{var V=C.dom.getParent(X,"td,th");if(V){return O(Y,V,W)}else{var T=J(U,!Y);F(T);return d.dom.Event.cancel(W)}}}function J(S,R){var T=S&&S[R?"lastChild":"firstChild"];return T&&T.nodeName==="BR"?C.dom.getParent(T,"td,th"):T}function F(R){C.selection.setCursorLocation(R,0)}function A(){return Q==L.UP||Q==L.DOWN}function D(R){var T=R.selection.getNode();var S=R.dom.getParent(T,"tr");return S!==null}function P(S){var R=0;var T=S;while(T.previousSibling){T=T.previousSibling;R=R+a(T,"colspan")}return R}function E(T,R){var U=0;var S=0;e(T.children,function(V,W){U=U+a(V,"colspan");S=W;if(U>R){return false}});return S}function z(T,W,Y,V){var X=P(T.dom.getParent(W,"td,th"));var S=E(Y,X);var R=Y.childNodes[S];var U=J(R,V);F(U||R)}function H(R){var T=C.selection.getNode();var U=C.dom.getParent(T,"td,th");var S=C.dom.getParent(R,"td,th");return U&&U!==S&&I(U,S)}function I(S,R){return C.dom.getParent(S,"TABLE")===C.dom.getParent(R,"TABLE")}if(A()&&D(C)){var G=C.selection.getNode();setTimeout(function(){if(H(G)){O(!N.shiftKey&&Q===L.UP,G,N)}},0)}}r.onKeyDown.add(v)}if(!d.isIE){function s(){var w;for(w=r.getBody().lastChild;w&&w.nodeType==3&&!w.nodeValue.length;w=w.previousSibling){}if(w&&w.nodeName=="TABLE"){r.dom.add(r.getBody(),"p",null,'<br mce_bogus="1" />')}}if(d.isGecko){r.onKeyDown.add(function(z,B){var w,A,C=z.dom;if(B.keyCode==37||B.keyCode==38){w=z.selection.getRng();A=C.getParent(w.startContainer,"table");if(A&&z.getBody().firstChild==A){if(c(w,A)){w=C.createRng();w.setStartBefore(A);w.setEndBefore(A);z.selection.setRng(w);B.preventDefault()}}}})}r.onKeyUp.add(s);r.onSetContent.add(s);r.onVisualAid.add(s);r.onPreProcess.add(function(w,A){var z=A.node.lastChild;if(z&&z.childNodes.length==1&&z.firstChild.nodeName=="BR"){w.dom.remove(z)}});if(d.isGecko){r.onKeyDown.add(function(z,B){if(B.keyCode===d.VK.ENTER&&B.shiftKey){var A=z.selection.getRng().startContainer;var C=q.getParent(A,"td,th");if(C){var w=z.getDoc().createTextNode("\uFEFF");q.insertAfter(w,A)}}})}s();r.startContent=r.getContent({format:"raw"})}});e({mceTableSplitCells:function(n){n.split()},mceTableMergeCells:function(o){var p,q,n;n=g.dom.getParent(g.selection.getNode(),"th,td");if(n){p=n.rowSpan;q=n.colSpan}if(!g.dom.select("td.mceSelected,th.mceSelected").length){f.open({url:h+"/merge_cells.htm",width:240+parseInt(g.getLang("table.merge_cells_delta_width",0)),height:110+parseInt(g.getLang("table.merge_cells_delta_height",0)),inline:1},{rows:p,cols:q,onaction:function(r){o.merge(n,r.cols,r.rows)},plugin_url:h})}else{o.merge()}},mceTableInsertRowBefore:function(n){n.insertRow(true)},mceTableInsertRowAfter:function(n){n.insertRow()},mceTableInsertColBefore:function(n){n.insertCol(true)},mceTableInsertColAfter:function(n){n.insertCol()},mceTableDeleteCol:function(n){n.deleteCols()},mceTableDeleteRow:function(n){n.deleteRows()},mceTableCutRow:function(n){m=n.cutRows()},mceTableCopyRow:function(n){m=n.copyRows()},mceTablePasteRowBefore:function(n){n.pasteRows(m,true)},mceTablePasteRowAfter:function(n){n.pasteRows(m)},mceTableDelete:function(n){n.deleteTable()}},function(o,n){g.addCommand(n,function(){var p=l();if(p){o(p);g.execCommand("mceRepaint");k()}})});e({mceInsertTable:function(n){f.open({url:h+"/table.htm",width:400+parseInt(g.getLang("table.table_delta_width",0)),height:320+parseInt(g.getLang("table.table_delta_height",0)),inline:1},{plugin_url:h,action:n?n.action:0})},mceTableRowProps:function(){f.open({url:h+"/row.htm",width:400+parseInt(g.getLang("table.rowprops_delta_width",0)),height:295+parseInt(g.getLang("table.rowprops_delta_height",0)),inline:1},{plugin_url:h})},mceTableCellProps:function(){f.open({url:h+"/cell.htm",width:400+parseInt(g.getLang("table.cellprops_delta_width",0)),height:295+parseInt(g.getLang("table.cellprops_delta_height",0)),inline:1},{plugin_url:h})}},function(o,n){g.addCommand(n,function(p,q){o(q)})})}});d.PluginManager.add("table",d.plugins.TablePlugin)})(tinymce); | PypiClean |
/CramUnit-0.8.tar.gz/CramUnit-0.8/bin/run_cram_unit.py | import os
import sys
import argparse
import logging
import time
import warnings
import glob
import cram_unit
import cram_unit.crammer as crammer
__version__ = cram_unit.get_version()
log = logging.getLogger()
def _setup_log(level=logging.DEBUG):
handler = logging.StreamHandler(sys.stdout)
str_formatter = '[%(levelname)s] %(asctime)-15s [%(name)s %(funcName)s %(lineno)d] %(message)s'
formatter = logging.Formatter(str_formatter)
handler.setFormatter(formatter)
handler.setLevel(level)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
def _get_parser():
"""Return an instance of ArgumentParser"""
p = argparse.ArgumentParser(version=__version__)
p.add_argument('cram_tests_dir', help="Cram test directory to run.")
p.add_argument("--debug", action="store_true",
help='Turn on debug mode and log to stdout.')
p.add_argument('-x', dest='xunit_file', default="cram_xunit.xml",
help="Name of file to write Xunit.xml output to.")
p.add_argument("--cram_prefix", default=None,
help="Prefix that will be added to the test case name. \
(e.g., test_{PREFIX}_{CRAM_FILE})")
p.add_argument("--verbose", action='store_true',
help="pass verbose option to cram")
return p
def main():
"""Main Point of Entry"""
p = _get_parser()
args = p.parse_args()
xunit_file = args.xunit_file
debug = args.debug
cram_tests_dir = args.cram_tests_dir
cram_prefix = args.cram_prefix
if not os.path.exists(cram_tests_dir):
msg = "Unable to Find directory {c}".format(c=cram_tests_dir)
sys.stderr.write(msg + "\n")
return -1
if debug:
_setup_log(logging.DEBUG)
# Hacky kinda of interface now. Just grab all the *.t files from the dir.
cram_files = [os.path.abspath(f) for f in glob.glob("{d}/*.t".format(d=cram_tests_dir))]
if not cram_files:
msg = "Unable to find any *.t files in {x}".format(x=cram_tests_dir)
warnings.warn(msg + "\n")
return 0
#print cram_files
log.info("Found {n} cram files to test {c}".format(n=len(cram_files), c=cram_files))
started_at = time.time()
state = crammer.run(cram_files, xunit_file, prefix=cram_prefix, debug=debug)
run_time = time.time() - started_at
rcode = 0 if state else -1
log.info("Exiting {f} with rcode {r} in {s:.2f} sec.".format(f=os.path.basename(__file__), r=rcode, s=run_time))
return rcode
if __name__ == '__main__':
sys.exit(main()) | PypiClean |
/DXC-Industrialized-AI-Starter-3.2.0.tar.gz/DXC-Industrialized-AI-Starter-3.2.0/dxc/ai/publish_microservice/publish_new_microservice.py | from git import Repo
from github import Github
import os.path
import git, os, shutil
from dxc.ai.global_variables import globals_file
import pickle
import pandas as pd
from getpass import getpass
def create_git_repo(github_design, pat_key):
gitrepo = Github(pat_key)
user = gitrepo.get_user()
git_repo_create = user.create_repo(github_design["Repository_Name"])
print("Github Repo %s Created" %github_design["Repository_Name"])
def publish_github(github_design, pat_key):
REMOTE_URL = "https://" + str(pat_key) + "@github.com/" + str(github_design["GitHub_Username"]) + '/' + str(github_design["Repository_Name"]) + ".git"
COMMIT_MSG = github_design["Commit_Message"]
BRANCH = github_design["Branch"]
if not os.path.exists(github_design["Repository_Name"]):
os.makedirs(github_design["Repository_Name"])
new_path = os.path.join(github_design["Repository_Name"])
DIR_NAME = new_path
else:
DIR_NAME = github_design["Repository_Name"]
return DIR_NAME, REMOTE_URL, COMMIT_MSG, BRANCH
def publish_app_api(github_design):
print("Please enter your GitHub PAT")
pat_key = getpass()
try:
create_git_repo(github_design, pat_key)
except:
print("Github Repo %s already exists" %github_design["Repository_Name"])
DIR_NAME, REMOTE_URL, COMMIT_MSG, BRANCH = publish_github(github_design, pat_key)
git_operation_clone(DIR_NAME,REMOTE_URL)
if not os.path.exists(github_design["Repository_Name"]):
os.makedirs(github_design["Repository_Name"])
if not globals_file.imported_model_files:
try:
folder_path = str(github_design["Repository_Name"]) + '/' + str(github_design["Github_Model_Folder"]) + "/best_pipeline.py"
if os.path.exists(folder_path):
globals_file.imported_model_files = True
except:
pass
if globals_file.imported_model_files:
encoder_file_path = str(github_design["Repository_Name"]) + '/' + str(github_design["Github_Model_Folder"]) + "/encoder.pkl"
if os.path.exists(encoder_file_path):
encoder_path = "https://github.com/" + str(github_design["GitHub_Username"]) + '/' + str(github_design["Repository_Name"]) + "/blob/" + str(github_design["Branch"]) + "/encoder.pkl?raw=true"
else:
encoder_path = ''
target_encoder_file_path = str(github_design["Repository_Name"]) + '/' + str(github_design["Github_Model_Folder"]) + "/target_encoder.pkl"
if os.path.exists(target_encoder_file_path):
target_encoder_path = "https://github.com/" + str(github_design["GitHub_Username"]) + '/' + str(github_design["Repository_Name"]) + "/blob/" + str(github_design["Branch"]) + "/target_encoder.pkl?raw=true"
else:
target_encoder_path = ''
data_path = "https://raw.githubusercontent.com/" + str(github_design["GitHub_Username"]) + '/' + str(github_design["Repository_Name"]) + '/' + str(github_design["Branch"]) + "/prepared_data.csv"
else:
if globals_file.run_experiment_encoder_used:
encoder_path = "https://github.com/" + str(github_design["GitHub_Username"]) + '/' + str(github_design["Repository_Name"]) + "/blob/" + str(github_design["Branch"]) + "/encoder.pkl?raw=true"
else:
encoder_path = ''
if globals_file.run_experiment_target_encoder_used:
target_encoder_path = "https://github.com/" + str(github_design["GitHub_Username"]) + '/' + str(github_design["Repository_Name"]) + "/blob/" + str(github_design["Branch"]) + "/target_encoder.pkl?raw=true"
else:
target_encoder_path = ''
data_path = "https://raw.githubusercontent.com/" + str(github_design["GitHub_Username"]) + '/' + str(github_design["Repository_Name"]) + '/' + str(github_design["Branch"]) + "/prepared_data.csv"
if globals_file.imported_model_files:
generate_model_req_files(github_design)
else:
generate_req_files(github_design)
generate_app_script(github_design, data_path, encoder_path, target_encoder_path)
git_operation_push(DIR_NAME, BRANCH, COMMIT_MSG)
def git_operation_clone(DIR_NAME,REMOTE_URL):
try:
if os.path.isdir(DIR_NAME):
shutil.rmtree(DIR_NAME)
os.mkdir(DIR_NAME)
repo = git.Repo.init(DIR_NAME)
origin = repo.create_remote('origin', REMOTE_URL)
origin.fetch()
origin.pull(origin.refs[0].remote_head)
except Exception as e:
print(str(e))
def git_operation_push(DIR_NAME, BRANCH, COMMIT_MSG):
try:
repo = Repo(DIR_NAME)
try:
repo.git.checkout('-b', BRANCH)
except:
repo.git.checkout(BRANCH)
repo.git.add('--all')
repo.index.commit(COMMIT_MSG)
origin = repo.remote('origin')
origin.push(BRANCH)
repo.git.add(update=True)
print("Github Push Successful")
except Exception as e:
print(str(e))
def generate_req_files(github_design):
tpot_data = pd.read_csv('/content\data_file.csv')
# Save encoder
if globals_file.run_experiment_encoder_used:
encoder_save_path = str(github_design["Repository_Name"]) + "/encoder.pkl"
pickle.dump(globals_file.run_experiment_encoder, open(encoder_save_path, 'wb'))
print("Data encoder saved in encoder.pkl file")
# Save dataset
data_save_path = str(github_design["Repository_Name"]) + "/prepared_data.csv"
tpot_data.to_csv(data_save_path, index=False)
print('Data saved in prepared_data.csv file')
# Save target encoder
if globals_file.run_experiment_target_encoder_used:
target_encoder_save_path = str(github_design["Repository_Name"]) + "/target_encoder.pkl"
pickle.dump(globals_file.run_experiment_target_encoder, open(target_encoder_save_path, 'wb'))
print("Target encoder saved in target_encoder.pkl file")
def generate_model_req_files(github_design):
data_file_path = str(github_design["Repository_Name"]) + '/' + str(github_design["Github_Model_Folder"]) + "/prepared_data.csv"
tpot_data = pd.read_csv(data_file_path)
# Save encoder
#if globals_file.run_experiment_encoder_used:
encoder_file_path = str(github_design["Repository_Name"]) + '/' + str(github_design["Github_Model_Folder"]) + "/encoder.pkl"
if os.path.exists(encoder_file_path):
with open(encoder_file_path, 'rb') as encoder_f:
encoder_file = pickle.load(encoder_f)
encoder_save_path = str(github_design["Repository_Name"]) + "/encoder.pkl"
pickle.dump(encoder_file, open(encoder_save_path, 'wb'))
print("Data encoder saved in encoder.pkl file")
# Save dataset
data_save_path = str(github_design["Repository_Name"]) + "/prepared_data.csv"
tpot_data.to_csv(data_save_path, index=False)
print('Data saved in prepared_data.csv file')
# Save target encoder
target_encoder_file_path = str(github_design["Repository_Name"]) + '/' + str(github_design["Github_Model_Folder"]) + "/target_encoder.pkl"
if os.path.exists(target_encoder_file_path):
with open(target_encoder_file_path, 'rb') as target_f:
target_encoder = pickle.load(target_f)
target_encoder_save_path = str(github_design["Repository_Name"]) + "/target_encoder.pkl"
pickle.dump(target_encoder, open(target_encoder_save_path, 'wb'))
print("Target encoder saved in target_encoder.pkl file")
def generate_app_script(github_design, data_path='', encoder_path='', target_encoder_path=''):
requirements = """pip-upgrader
Flask
pandas
numpy
requests
sklearn
scikit-learn
gunicorn
"""
requirement_save_path = str(github_design["Repository_Name"]) + "/requirements.txt"
with open(requirement_save_path, 'w') as f:
f.write(requirements)
print('Generated requirements.txt file')
if globals_file.imported_model_files:
pipeline_save_path = str(github_design["Repository_Name"]) + '/' + str(github_design["Github_Model_Folder"]) + "/best_pipeline.py"
with open(pipeline_save_path, "r") as txt_file:
script = txt_file.readlines()
script = open(pipeline_save_path).read()
git_data_save_path = str(github_design["Repository_Name"]) + '/' + str(github_design["Github_Model_Folder"]) + "/prepared_data.csv"
script = script.replace(git_data_save_path, "prepared_data.csv")
script = script.replace("results = exported_pipeline.predict(testing_features)", "")
else:
with open("best_pipeline.py", "r") as txt_file:
script = txt_file.readlines()
script = open("best_pipeline.py").read()
script = script.replace("'/content\\data_file.csv\', sep=\'COLUMN_SEPARATOR\', dtype=np.float64", "'prepared_data.csv'")
script = script.replace("results = exported_pipeline.predict(testing_features)", "")
#code of app.py
app_script = """import pandas as pd
from flask import Flask, jsonify, request
import pickle
# Code from Best Pipeline.py
best_pipeline
# Flask app script
#app
app = Flask(__name__)
#routes
@app.route('/', methods=['POST'])
def predict():
#get data
data = request.get_json(force=True)
#convert data into dataframe
data.update((x, [y]) for x, y in data.items())
data_df = pd.DataFrame.from_dict(data)
#predictions
try:
with open('encoder.pkl', 'rb') as f:
encoder = pickle.load(f)
data_df = encoder.transform(data_df)
except:
print("No encoder exist")
result = exported_pipeline.predict(data_df)
#decode the output
try:
with open('target_encoder.pkl', 'rb') as f:
target_encoder = pickle.load(f)
result = target_encoder.inverse_transform(result)
except:
print("No target encoder exist")
#send back to browser
output = {'results': result[0]}
#return data
return jsonify(results=output)
if __name__ == "__main__":
# app.run(debug = True)
app.run(host ='0.0.0.0', port = 8080, debug = True)
"""
app_script = app_script.replace('best_pipeline', script)
app_save_path = str(github_design["Repository_Name"]) + "/app.py"
with open(app_save_path, 'w') as f:
f.write(app_script)
print('Generated app.py file to build the application')
profile_script = """web: gunicorn app:app"""
procfile_save_path = str(github_design["Repository_Name"]) + "/Procfile"
with open(procfile_save_path, 'w') as f:
f.write(profile_script)
print('Generated procfile to build the application') | PypiClean |
/NeuroUnits-0.1.2.tar.gz/NeuroUnits-0.1.2/src/neurounits/visitors/common/ast_symbol_dependancies.py |
# -------------------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -------------------------------------------------------------------------------
from neurounits.visitors import ASTVisitorBase
import itertools
from itertools import chain
class VisitorFindDirectSymbolDependance_OLD(ASTVisitorBase):
""" Finds symbol dependance on one another, but does
not recurse over assignments. I.e
a = b+2
d = c+a
Then 'b' will not be reported as a dependancy on 'd'
"""
@classmethod
def get_assignment_dependancy_ordering(cls, eqnset):
assert False, 'Move to new version of this class!'
from neurounits.ast.astobjects import AssignedVariable
deps = dict([(ass.lhs, VisitorFindDirectSymbolDependance_OLD().visit(ass.rhs_map)) for ass in eqnset.assignments])
ordered = []
to_order = set(deps.keys())
while to_order:
found = False
for o in sorted(to_order, key=lambda o:o.symbol):
o_deps = [d for d in deps[o] if type(d) == AssignedVariable]
o_deps_unsatisfied = [d for d in o_deps if not d in ordered]
if len(o_deps_unsatisfied) == 0:
ordered.append(o)
to_order.remove(o)
found = True
break
# Prevent recursion:
assert found == True, """Can't find the dependencies for: %s"""%",".join( [o.symbol for o in to_order] )
assert len(ordered) == len(eqnset._eqn_assignment)
return ordered
@classmethod
def get_assignment_dependancy_ordering_recursive(cls, eqnset, ass):
assert False, 'Move to new version of this class!'
from neurounits.ast.astobjects import AssignedVariable
D = VisitorFindDirectSymbolDependance_OLD()
D.visit(eqnset)
def ass_deps(a):
return [t for t in D.dependancies[a] if isinstance(t, AssignedVariable)]
# resolved_deps = set()
required_deps = set(ass_deps(ass))
# required_deps = set()
# Find all the dependancies:
start_dep_len = None
while start_dep_len != len(required_deps):
start_dep_len = len(required_deps)
to_add = set()
for i in required_deps:
for i_dep in ass_deps(i):
if not i_dep in required_deps:
to_add.add(i_dep)
required_deps = required_deps | to_add
op = [o for o in cls.get_assignment_dependancy_ordering(eqnset) if o in required_deps]
return op
def __init__(self):
assert False, 'Move to new version of this class!'
self.dependancies = {}
@classmethod
def build_direct_dependancy_graph(cls, component):
import networkx as nx
assert False, 'Move to new version of this class!'
import neurounits.ast as ast
dep_find = VisitorFindDirectSymbolDependance_OLD()
dep_find.visit(component)
g = nx.DiGraph()
all_objs = set( list(chain(*dep_find.dependancies.values())) + dep_find.dependancies.keys() )
for o in all_objs:
color_lut = {
ast.AssignedVariable: 'green',
ast.StateVariable: 'blue', }
color = color_lut[type(o)] if type(o) in color_lut else 'red'
g.add_node(o, label=repr(o), color=color)
for k,v in dep_find.dependancies.items():
for c in v:
g.add_edge(k,c)
return g
def VisitNineMLComponent(self, o, **kwargs):
assert False, 'Move to new version of this class!'
for a in o.assignments:
self.dependancies[a.lhs] = self.visit(a)
for a in o.timederivatives:
self.dependancies[a.lhs] = self.visit(a)
for rt_graph in o._rt_graphs:
self.dependancies[rt_graph] = []
for tr in o.transitions:
tr_deps =self.visit(tr)
import neurounits.ast as ast
tr_deps = [o for o in tr_deps if not (isinstance(o, ast.SuppliedValue) and o.symbol=='t') ]
self.dependancies[tr.rt_graph].extend ( tr_deps)
def VisitSymbolicConstant(self, o, **kwargs):
return []
def VisitIfThenElse(self, o, **kwargs):
d1 = self.visit(o.predicate, **kwargs)
d2 = self.visit(o.if_true_ast, **kwargs)
d3 = self.visit(o.if_false_ast, **kwargs)
return d1 + d2 + d3
def VisitInEquality(self, o, **kwargs):
d1 = self.visit(o.lesser_than, **kwargs)
d2 = self.visit(o.greater_than, **kwargs)
return d1 + d2
def VisitBoolAnd(self, o, **kwargs):
d1 = self.visit(o.lhs, **kwargs)
d2 = self.visit(o.rhs, **kwargs)
return d1 + d2
def VisitBoolOr(self, o, **kwargs):
d1 = self.visit(o.lhs, **kwargs)
d2 = self.visit(o.rhs, **kwargs)
return d1 + d2
def VisitBoolNot(self, o, **kwargs):
d1 = self.visit(o.lhs, **kwargs)
return d1
# Function Definitions:
def VisitFunctionDefUser(self, o, **kwargs):
raise NotImplementedError()
def VisitFunctionDefBuiltIn(self, o, **kwargs):
raise NotImplementedError()
def VisitFunctionDefParameter(self, o, **kwargs):
raise NotImplementedError()
# Terminals:
def VisitStateVariable(self, o, **kwargs):
return [o]
def VisitParameter(self, o, **kwargs):
return [o]
def VisitConstant(self, o, **kwargs):
return []
def VisitConstantZero(self, o, **kwargs):
return []
def VisitAssignedVariable(self, o, **kwargs):
return [o]
def VisitSuppliedValue(self, o, **kwargs):
return [o]
# AST Objects:
def VisitTimeDerivativeByRegime(self, o, **kwargs):
return self.visit(o.rhs_map)
def VisitRegimeDispatchMap(self, o, **kwargs):
# Visit the RT graph, to work out what it depends on!
#self.visit(o.get_rt_graph())
symbols = []
for rhs in o.rhs_map.values():
symbols.extend(self.visit(rhs, **kwargs))
# The RT graph is only a dependance if there is more than one possibility!
return symbols + ([o.get_rt_graph()] if len(o.rhs_map) > 1 else [] )
def VisitRTGraph(self, o, **kwargs):
return []
def visit_trans(self,o):
action_deps = []
for a in o.actions:
action_deps.extend( self.visit(a, rt_graph=o.rt_graph))
return list(set(action_deps))
def VisitOnConditionTriggerTransition(self, o, **kwargs):
return [o.rt_graph] + self.visit_trans(o) + self.visit(o.trigger)
def VisitOnTransitionEvent(self, o, **kwargs):
return [o.rt_graph] + self.visit_trans(o)
def VisitOnEventStateAssignment(self,o, rt_graph, **kwargs):
# In the case of state assignements, then the state-variable is now dependant on the RT-grpah
assert o.lhs in self.dependancies
self.dependancies[o.lhs].append(rt_graph)
return [o.lhs] + self.visit(o.rhs)
def VisitEmitEvent(self, o, rt_graph):
return list( chain(*[self.visit(p) for p in o.parameters]) )
def VisitEmitEventParameter(self, o):
return self.visit(o.rhs)
#return list( chain(*[self.visit(p) for p in o.parameters]) )
def VisitOnEventDefParameter(self,o):
return []
def VisitEqnAssignmentByRegime(self, o, **kwargs):
return self.visit(o.rhs_map)
def VisitAddOp(self, o, **kwargs):
return self.visit(o.lhs) + self.visit(o.rhs)
def VisitSubOp(self, o, **kwargs):
return self.visit(o.lhs) + self.visit(o.rhs)
def VisitMulOp(self, o, **kwargs):
return self.visit(o.lhs) + self.visit(o.rhs)
def VisitDivOp(self, o, **kwargs):
return self.visit(o.lhs) + self.visit(o.rhs)
def VisitExpOp(self, o, **kwargs):
return self.visit(o.lhs)
def VisitFunctionDefBuiltInInstantiation(self, o, **kwargs):
return list(itertools.chain(*[self.visit(p) for p in o.parameters.values()]))
def VisitFunctionDefUserInstantiation(self, o, **kwargs):
return list(itertools.chain(*[self.visit(p) for p in o.parameters.values()]))
def VisitFunctionDefInstantiationParameter(self, o, **kwargs):
return self.visit(o.rhs_ast)
def VisitAnalogReducePort(self, o, **kwargs):
return [o] + list(itertools.chain( *[self.visit(a) for a in o.rhses]))
def VisitRandomVariable(self, o):
return list(itertools.chain( *[self.visit(p) for p in o.parameters]))
def VisitRandomVariableParameter(self,o):
return self.visit(o.rhs_ast) | PypiClean |
/ClueDojo-1.4.3-1.tar.gz/ClueDojo-1.4.3-1/src/cluedojo/static/dojox/charting/Theme.js | if(!dojo._hasResource["dojox.charting.Theme"]){
dojo._hasResource["dojox.charting.Theme"]=true;
dojo.provide("dojox.charting.Theme");
dojo.require("dojox.color");
dojo.require("dojox.color.Palette");
(function(){
var _1=dojox.charting;
_1.Theme=function(_2){
_2=_2||{};
var _3=_1.Theme._def;
dojo.forEach(["chart","plotarea","axis","series","marker"],function(n){
this[n]=dojo.delegate(_3[n],_2[n]||{});
},this);
this.markers=dojo.delegate(_1.Theme.Markers,_2.markers||{});
this.colors=[];
this.antiAlias=("antiAlias" in _2)?_2.antiAlias:true;
this.assignColors=("assignColors" in _2)?_2.assignColors:true;
this.assignMarkers=("assignMarkers" in _2)?_2.assignMarkers:true;
_2.colors=_2.colors||_3.colors;
dojo.forEach(_2.colors,function(_4){
this.colors.push(_4);
},this);
this._current={color:0,marker:0};
this._markers=[];
this._buildMarkerArray();
};
_1.Theme.Markers={CIRCLE:"m-3,0 c0,-4 6,-4 6,0 m-6,0 c0,4 6,4 6,0",SQUARE:"m-3,-3 l0,6 6,0 0,-6 z",DIAMOND:"m0,-3 l3,3 -3,3 -3,-3 z",CROSS:"m0,-3 l0,6 m-3,-3 l6,0",X:"m-3,-3 l6,6 m0,-6 l-6,6",TRIANGLE:"m-3,3 l3,-6 3,6 z",TRIANGLE_INVERTED:"m-3,-3 l3,6 3,-6 z"};
_1.Theme._def={chart:{stroke:null,fill:"white"},plotarea:{stroke:null,fill:"white"},axis:{stroke:{color:"#333",width:1},majorTick:{color:"#666",width:1,length:6,position:"center"},minorTick:{color:"#666",width:0.8,length:3,position:"center"},microTick:{color:"#666",width:0.5,length:1,position:"center"},font:"normal normal normal 7pt Tahoma",fontColor:"#333"},series:{outline:{width:0.1,color:"#ccc"},stroke:{width:1.5,color:"#333"},fill:"#ccc",font:"normal normal normal 7pt Tahoma",fontColor:"#000"},marker:{stroke:{width:1},fill:"#333",font:"normal normal normal 7pt Tahoma",fontColor:"#000"},colors:["#54544c","#858e94","#6e767a","#948585","#474747"]};
dojo.extend(_1.Theme,{defineColors:function(_5){
var _6=_5||{};
var c=[],n=_6.num||5;
if(_6.colors){
var l=_6.colors.length;
for(var i=0;i<n;i++){
c.push(_6.colors[i%l]);
}
this.colors=c;
}else{
if(_6.hue){
var s=_6.saturation||100;
var st=_6.low||30;
var _7=_6.high||90;
var l=(_7+st)/2;
this.colors=dojox.color.Palette.generate(dojox.color.fromHsv(_6.hue,s,l),"monochromatic").colors;
}else{
if(_6.generator){
this.colors=dojox.color.Palette.generate(_6.base,_6.generator).colors;
}
}
}
},_buildMarkerArray:function(){
this._markers=[];
for(var p in this.markers){
this._markers.push(this.markers[p]);
}
this._current.marker=0;
},_clone:function(){
return new _1.Theme({chart:this.chart,plotarea:this.plotarea,axis:this.axis,series:this.series,marker:this.marker,antiAlias:this.antiAlias,assignColors:this.assignColors,assignMarkers:this.assigneMarkers,colors:dojo.delegate(this.colors)});
},addMarker:function(_8,_9){
this.markers[_8]=_9;
this._buildMarkerArray();
},setMarkers:function(_a){
this.markers=_a;
this._buildMarkerArray();
},next:function(_b){
if(_b=="marker"){
return this._markers[this._current.marker++%this._markers.length];
}else{
return this.colors[this._current.color++%this.colors.length];
}
},clear:function(){
this._current={color:0,marker:0};
}});
})();
} | PypiClean |
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/lattices/Struct42.py | from genice2.cell import cellvectors
import genice2.lattices
desc = {"ref": {"SpaceFullerene": 'Sikiric 2010'},
"usage": "No options available.",
"brief": "A space fullerene."
}
class Lattice(genice2.lattices.Lattice):
def __init__(self):
self.pairs = """
158 332
227 319
381 240
50 295
204 56
183 266
359 189
387 263
248 354
221 166
97 307
282 203
157 321
234 197
385 375
198 150
370 282
5 144
23 75
55 62
322 70
237 170
158 24
108 25
114 318
0 324
346 1
222 261
373 317
105 327
326 113
378 360
41 356
218 94
55 285
185 341
91 121
228 194
313 202
344 192
242 237
180 88
356 184
238 35
77 376
119 325
323 336
59 79
94 200
52 368
84 158
238 259
33 172
30 263
168 361
154 40
316 77
310 268
45 107
33 91
135 372
128 113
319 359
89 21
156 152
191 120
289 54
345 24
92 204
42 326
60 355
63 212
124 349
75 358
111 48
60 343
42 179
240 151
38 229
310 247
42 122
147 16
73 327
383 78
205 166
160 140
342 174
213 256
67 29
304 67
221 286
206 168
216 255
345 306
190 369
122 276
125 346
273 275
225 27
12 53
186 234
347 45
68 179
106 273
310 187
160 223
26 328
330 93
320 252
242 374
148 142
47 328
161 293
72 257
260 164
51 63
239 94
128 47
305 243
33 193
44 114
45 114
387 64
350 297
84 246
121 35
150 359
146 231
80 6
61 166
81 110
58 200
30 360
207 195
304 341
165 73
164 301
288 142
198 210
225 184
357 381
52 173
226 194
151 20
0 184
123 25
292 32
262 163
267 119
260 86
293 369
374 98
14 317
161 298
298 354
207 202
93 322
314 362
339 352
255 381
137 270
27 178
37 353
259 172
171 142
229 208
226 204
58 116
124 272
253 15
145 300
76 90
301 109
11 254
176 230
332 61
18 135
95 99
0 178
186 349
169 253
370 228
20 360
259 340
378 312
299 307
262 353
66 102
382 265
58 81
247 17
98 170
132 256
23 199
217 4
49 78
239 76
382 284
232 329
34 146
258 8
206 190
380 208
331 211
378 115
20 102
22 105
64 140
132 124
80 176
314 209
23 127
338 194
51 109
168 354
278 36
384 55
264 235
148 119
34 295
73 104
175 385
78 147
97 322
269 122
86 365
278 218
335 39
338 203
41 163
321 261
292 318
95 369
91 307
196 324
249 287
69 130
87 214
124 197
118 335
290 362
347 317
153 83
356 209
77 157
5 109
335 89
128 63
231 287
304 187
149 159
326 56
296 10
314 37
342 83
130 180
140 8
313 50
115 366
227 48
343 220
215 353
57 348
12 1
125 308
29 223
209 16
330 358
216 189
239 297
311 40
328 276
10 177
300 135
117 68
11 299
207 106
217 59
46 62
7 169
303 56
29 28
42 212
251 35
228 164
66 64
182 249
210 96
162 191
284 344
364 43
289 89
80 18
71 264
234 133
97 193
232 211
251 192
240 14
245 173
277 380
162 262
348 102
69 35
272 235
57 357
116 230
281 3
384 325
351 249
216 378
169 379
178 159
296 283
352 224
245 181
233 384
47 371
343 18
112 104
368 134
64 61
150 246
315 333
38 277
66 319
139 235
233 377
182 21
295 250
117 133
181 6
220 191
299 241
46 142
183 107
308 220
294 272
139 267
207 100
379 177
367 190
276 63
215 177
334 168
215 346
344 311
30 189
125 37
336 312
54 82
52 141
95 152
180 280
294 365
340 311
90 290
233 79
67 140
129 240
265 104
128 222
103 164
170 288
20 318
370 157
305 367
361 99
87 241
188 143
281 359
274 14
373 283
15 257
330 251
118 291
31 183
192 127
201 324
386 99
379 266
106 231
155 290
25 306
195 236
342 22
156 214
86 179
334 36
292 189
92 28
30 66
31 274
355 176
120 6
238 375
334 224
185 269
254 375
139 98
154 127
300 6
65 134
271 150
26 108
355 188
13 90
226 122
72 14
200 352
134 143
289 165
136 88
221 96
255 336
132 277
335 106
264 288
217 295
236 174
263 48
162 80
376 204
279 62
173 200
239 60
28 303
329 337
363 22
85 70
7 149
245 302
59 100
162 350
13 120
339 81
347 131
31 115
58 181
136 73
308 163
57 151
341 268
167 55
363 74
129 257
278 176
256 2
153 146
215 78
171 50
258 306
155 9
267 234
257 45
68 194
129 318
85 180
87 293
7 27
43 17
288 256
296 225
61 210
367 385
341 25
154 105
329 241
46 325
386 143
68 186
285 231
136 363
94 230
186 365
52 252
12 201
213 374
275 285
18 163
71 2
38 171
268 303
247 108
148 197
175 361
309 188
138 387
172 40
92 269
111 336
153 112
1 9
177 184
109 101
15 115
41 19
338 349
281 48
0 266
377 119
263 32
185 26
248 305
136 244
111 57
38 79
13 343
242 377
98 325
315 72
244 126
147 262
301 235
252 143
95 385
308 9
331 87
110 141
34 219
260 338
205 43
283 131
242 294
118 174
222 17
138 258
254 85
81 252
116 320
161 243
386 339
246 96
332 187
69 307
21 174
213 380
237 79
286 84
88 358
93 172
236 219
123 17
161 368
279 250
151 312
149 274
175 248
344 280
51 103
56 261
138 227
346 196
329 121
226 321
53 155
305 337
260 272
146 167
371 326
374 144
129 216
275 208
316 92
376 113
293 337
371 269
371 268
324 19
24 32
118 153
248 156
137 1
296 201
11 175
265 83
333 266
273 291
93 121
3 32
167 291
220 297
47 247
373 270
317 159
229 100
120 362
297 290
71 133
11 211
279 291
53 19
199 40
8 246
82 22
21 82
145 350
112 219
74 104
212 101
36 188
198 102
356 353
363 311
284 74
76 320
71 282
75 97
300 116
327 54
138 332
323 149
351 219
7 283
244 280
23 54
357 3
103 179
302 218
160 96
111 44
137 383
39 112
33 375
316 43
165 126
274 312
347 381
182 126
278 181
277 148
49 209
53 314
327 358
182 195
321 203
315 253
298 339
217 167
76 173
70 211
330 154
304 258
238 232
286 187
299 156
203 101
158 271
137 333
85 193
205 28
130 192
16 372
123 223
309 65
279 208
322 241
271 3
309 302
232 367
13 245
233 275
351 342
337 152
331 361
383 27
145 16
270 178
34 202
348 227
350 230
125 19
155 372
382 289
366 107
41 372
365 144
315 373
298 214
145 362
196 333
218 224
287 89
193 199
117 51
229 313
91 152
319 8
183 72
117 370
345 387
366 44
110 36
201 270
75 130
383 379
345 160
210 24
31 169
83 287
49 225
221 364
147 9
2 144
313 285
355 320
301 282
357 114
86 101
141 243
67 166
10 253
323 366
334 386
309 141
50 62
10 196
29 185
139 5
110 354
88 340
191 37
123 303
310 205
39 195
131 15
12 49
264 197
316 328
369 134
340 251
113 103
284 126
206 243
202 249
364 26
286 108
351 74
294 267
360 44
199 280
255 281
222 77
331 190
60 302
2 349
198 292
90 135
132 237
384 4
364 223
165 39
84 306
254 69
59 273
214 99
5 133
244 82
170 4
157 276
323 131
224 65
228 376
265 105
107 159
100 250
46 213
382 127
259 70
352 368
250 236
377 380
206 65
212 261
171 4
348 271
"""
self.waters = """
0.5 0.75 0.375
0.0 0.0 0.34429
0.125 0.25 0.77137
0.68238 0.0625 0.51069
0.5 0.0 0.84429
0.8125 0.06763 0.76069
0.5 0.44263 0.2548
0.875 0.43337 0.40535
0.0 0.375 0.56101
0.0 0.875 0.31893
0.375 0.25 0.38285
0.375 0.43337 0.09465
0.81663 0.125 0.34465
0.5 0.05738 0.2548
0.0 0.875 0.43899
0.375 0.25 0.43107
0.81663 0.56763 0.30585
0.5 0.05837 0.63962
0.375 0.75 0.27137
0.5 0.93238 0.33233
0.19263 0.75 0.4952
0.0 0.44163 0.94898
0.0 0.125 0.97863
0.81763 0.5625 0.01069
0.5 0.05837 0.55102
0.69163 0.25 0.61038
0.0 0.125 0.63285
0.81663 0.55837 0.38213
0.30837 0.56663 0.63213
0.18337 0.44163 0.61787
0.31763 0.4375 0.51069
0.19263 0.55837 0.42592
0.5 0.125 0.52137
0.0 0.75 0.0689
0.375 0.93337 0.90535
0.5 0.875 0.06101
0.31663 0.56763 0.19415
0.375 0.25 0.31101
0.31663 0.625 0.84465
0.5 0.375 0.93899
0.0 0.875 0.02137
0.5 0.75 0.3189
0.81663 0.43238 0.69415
0.30837 0.93337 0.63213
0.5 0.625 0.47863
0.5 0.94163 0.44898
0.0 0.06763 0.83233
0.81663 0.875 0.65535
0.68238 0.4375 0.51069
0.81663 0.375 0.34465
0.19163 0.93337 0.86787
0.8125 0.94263 0.71791
0.68337 0.93238 0.19415
0.69163 0.05738 0.32409
0.81763 0.4375 0.98932
0.80837 0.93337 0.86787
0.5 0.43238 0.66767
0.80738 0.75 0.4952
0.6875 0.44263 0.21791
0.5 0.375 0.86716
0.1875 0.93238 0.23932
0.375 0.75 0.56893
0.0 0.05837 0.86038
0.81663 0.06763 0.69415
0.31763 0.56663 0.55585
0.19163 0.05738 0.17592
0.19263 0.5625 0.53209
0.125 0.56663 0.59465
0.0 0.625 0.7286
0.5 0.625 0.06101
0.125 0.25 0.06893
0.1875 0.06763 0.76069
0.19263 0.94163 0.42592
0.5 0.25 0.97863
0.30738 0.9375 0.96791
0.69263 0.4375 0.03209
0.8125 0.93238 0.23932
0.30837 0.94263 0.67592
0.0 0.5 0.34429
0.5 0.5 0.84429
0.3125 0.56763 0.26069
0.68337 0.56763 0.19415
0.0 0.375 0.97863
0.81763 0.93337 0.94415
0.80738 0.05837 0.57409
0.18238 0.43337 0.05585
0.6875 0.43238 0.73932
0.875 0.25 0.11716
0.375 0.25 0.02141
0.81763 0.56663 0.94415
0.6875 0.93238 0.26069
0.81763 0.75 0.08233
0.18337 0.625 0.65535
0.81763 0.06663 0.05585
0.0 0.25 0.22863
0.0 0.75 0.125
0.19263 0.05837 0.57409
0.81763 0.43337 0.05585
0.68337 0.06763 0.80585
0.0 0.55837 0.13962
0.31663 0.44163 0.88213
0.625 0.25 0.72863
0.125 0.75 0.52141
0.6875 0.75 0.71311
0.5 0.0625 0.96311
0.81763 0.0625 0.98932
0.625 0.56663 0.90535
0.5 0.75 0.43111
0.81663 0.05837 0.61787
0.6875 0.06763 0.73932
0.5 0.625 0.18107
0.68238 0.5625 0.48932
0.5 0.125 0.93899
0.625 0.75 0.68899
0.5 0.875 0.47863
0.31763 0.43337 0.44415
0.8125 0.56763 0.23932
0.0 0.875 0.7286
0.81763 0.25 0.91767
0.875 0.75 0.81101
0.5 0.25 0.27137
0.69263 0.94163 0.07409
0.0 0.375 0.68107
0.5 0.25 0.625
0.3125 0.55738 0.78209
0.30837 0.05738 0.32409
0.30738 0.5625 0.96791
0.69263 0.75 0.0048
0.69163 0.94263 0.67592
0.19263 0.0625 0.46791
0.5 0.5625 0.03689
0.625 0.25 0.43107
0.31663 0.43238 0.80585
0.0 0.94263 0.7548
0.0 0.93238 0.16767
0.625 0.75 0.27137
0.30738 0.25 0.9952
0.0 0.875 0.36716
0.68238 0.56663 0.55585
0.6875 0.94263 0.78209
0.19263 0.44163 0.57409
0.5 0.875 0.18107
0.19163 0.94263 0.82409
0.0 0.75 0.18111
0.875 0.25 0.77137
0.8125 0.44263 0.28209
0.625 0.93337 0.90535
0.0 0.625 0.31893
0.125 0.75 0.81101
0.80738 0.55837 0.42592
0.0 0.0625 0.53689
0.0 0.75 0.47863
0.80837 0.75 0.11038
0.69263 0.05837 0.92592
0.81763 0.9375 0.01069
0.81663 0.93238 0.30585
0.68337 0.55837 0.11787
0.18337 0.06763 0.69415
0.68238 0.93337 0.55585
0.68238 0.75 0.41767
0.31763 0.25 0.58233
0.68337 0.125 0.15535
0.1875 0.44263 0.28209
0.30837 0.75 0.30102
0.5 0.75 0.72863
0.5 0.4375 0.96311
0.25 0.75 0.59429
0.68337 0.05837 0.88213
0.31663 0.375 0.15535
0.125 0.43337 0.40535
0.5 0.125 0.81893
0.31663 0.875 0.84465
0.0 0.94163 0.05102
0.6875 0.05738 0.21791
0.0 0.25 0.93111
0.31663 0.55837 0.11787
0.1875 0.56763 0.23932
0.30837 0.43337 0.36787
0.69163 0.75 0.38962
0.8125 0.55738 0.71791
0.30738 0.4375 0.03209
0.5 0.375 0.2286
0.18238 0.56663 0.94415
0.31763 0.75 0.41767
0.5 0.55837 0.36038
0.0 0.375 0.63285
0.0 0.55738 0.7548
0.75 0.75 0.59429
0.19163 0.75 0.19898
0.19263 0.25 0.5048
0.19163 0.06663 0.13213
0.3125 0.25 0.28689
0.5 0.75 0.02137
0.0 0.55837 0.05102
0.1875 0.55738 0.71791
0.30738 0.44163 0.92592
0.30837 0.06663 0.36787
0.1875 0.75 0.78689
0.19263 0.9375 0.53209
0.0 0.625 0.02137
0.8125 0.25 0.21311
0.69163 0.06663 0.36787
0.25 0.75 0.90571
0.375 0.25 0.72863
0.30837 0.55738 0.67592
0.375 0.75 0.61716
0.31663 0.125 0.15535
0.375 0.56663 0.90535
0.0 0.44163 0.86038
0.69163 0.44263 0.32409
0.31763 0.93337 0.55585
0.25 0.25 0.09429
0.69163 0.25 0.69898
0.0 0.25 0.8189
0.80837 0.43337 0.13213
0.18337 0.375 0.34465
0.125 0.25 0.4786
0.5 0.125 0.86716
0.1875 0.25 0.21311
0.30738 0.05837 0.92592
0.1875 0.05738 0.28209
0.125 0.93337 0.59465
0.5 0.06763 0.66767
0.30837 0.25 0.61038
0.125 0.25 0.18899
0.69163 0.43337 0.36787
0.18337 0.43238 0.69415
0.80738 0.5625 0.53209
0.3125 0.75 0.71311
0.19163 0.56663 0.86787
0.0 0.44263 0.2452
0.75 0.75 0.90571
0.375 0.06663 0.09465
0.68337 0.625 0.84465
0.0 0.75 0.77137
0.5 0.875 0.77141
0.18238 0.25 0.91767
0.5 0.375 0.81893
0.30738 0.94163 0.07409
0.0 0.05738 0.2452
0.0 0.9375 0.46311
0.75 0.25 0.09429
0.68337 0.43238 0.80585
0.5 0.0 0.15571
0.18238 0.4375 0.98932
0.5 0.125 0.2286
0.0 0.125 0.56101
0.69163 0.93337 0.63213
0.5 0.625 0.13285
0.125 0.75 0.93107
0.19163 0.25 0.88962
0.5 0.9375 0.03689
0.80837 0.75 0.19898
0.25 0.25 0.40571
0.30738 0.55837 0.07409
0.875 0.25 0.4786
0.19163 0.25 0.80102
0.31763 0.06663 0.44415
0.80738 0.44163 0.57409
0.18238 0.06663 0.05585
0.5 0.55738 0.7452
0.5 0.25 0.68111
0.18337 0.56763 0.30585
0.5 0.375 0.52137
0.3125 0.94263 0.78209
0.69263 0.9375 0.96791
0.30837 0.75 0.38962
0.8125 0.75 0.78689
0.69163 0.56663 0.63213
0.0 0.5 0.65571
0.81663 0.94163 0.38213
0.80738 0.9375 0.53209
0.5 0.625 0.77141
0.68337 0.44163 0.88213
0.0 0.625 0.43899
0.80837 0.56663 0.86787
0.0 0.125 0.68107
0.19163 0.55738 0.82409
0.3125 0.44263 0.21791
0.0 0.25 0.875
0.18238 0.5625 0.01069
0.80738 0.25 0.5048
0.3125 0.06763 0.73932
0.75 0.25 0.40571
0.375 0.75 0.9786
0.875 0.75 0.88285
0.875 0.93337 0.59465
0.875 0.75 0.93107
0.31663 0.06763 0.80585
0.69263 0.5625 0.96791
0.8125 0.05738 0.28209
0.80837 0.25 0.88962
0.31763 0.0625 0.51069
0.80837 0.06663 0.13213
0.6875 0.55738 0.78209
0.31663 0.05837 0.88213
0.625 0.25 0.38285
0.0 0.125 0.27141
0.68337 0.375 0.15535
0.625 0.43337 0.09465
0.6875 0.56763 0.26069
0.5 0.94263 0.7452
0.3125 0.05738 0.21791
0.5 0.44163 0.63962
0.875 0.56663 0.59465
0.5 0.875 0.13285
0.68238 0.25 0.58233
0.69263 0.55837 0.07409
0.18337 0.93238 0.30585
0.31663 0.93238 0.19415
0.625 0.75 0.61716
0.18238 0.9375 0.01069
0.0 0.5625 0.46311
0.125 0.75 0.88285
0.625 0.25 0.31101
0.125 0.06663 0.40535
0.18337 0.875 0.65535
0.80738 0.94163 0.42592
0.31763 0.9375 0.48932
0.0 0.4375 0.53689
0.875 0.75 0.22863
0.30837 0.25 0.69898
0.875 0.25 0.06893
0.68238 0.43337 0.44415
0.5 0.94163 0.36038
0.80837 0.94263 0.82409
0.69163 0.55738 0.67592
0.69263 0.25 0.9952
0.0 0.0 0.65571
0.625 0.06663 0.09465
0.69263 0.0625 0.03209
0.125 0.25 0.11716
0.625 0.75 0.56893
0.18337 0.94163 0.38213
0.19163 0.44263 0.17592
0.69263 0.44163 0.92592
0.80738 0.4375 0.46791
0.68337 0.94163 0.11787
0.3125 0.43238 0.73932
0.80837 0.44263 0.17592
0.30738 0.0625 0.03209
0.81663 0.44163 0.61787
0.0 0.05837 0.94898
0.3125 0.93238 0.26069
0.30738 0.75 0.0048
0.5 0.25 0.5689
0.18337 0.125 0.34465
0.68238 0.06663 0.44415
0.875 0.75 0.52141
0.1875 0.43238 0.76069
0.0 0.375 0.27141
0.18238 0.93337 0.94415
0.875 0.25 0.18899
0.30837 0.44263 0.32409
0.5 0.5 0.15571
0.125 0.75 0.22863
0.5 0.56763 0.33233
0.68238 0.9375 0.48932
0.625 0.25 0.02141
0.0 0.25 0.52137
0.31763 0.5625 0.48932
0.19163 0.43337 0.13213
0.6875 0.25 0.28689
0.18238 0.0625 0.98932
0.18337 0.05837 0.61787
0.8125 0.43238 0.76069
0.5 0.55837 0.44898
0.31663 0.94163 0.11787
0.80837 0.05738 0.17592
0.0 0.94163 0.13962
0.1875 0.94263 0.71791
0.81663 0.625 0.65535
0.69163 0.75 0.30102
0.875 0.06663 0.40535
0.80837 0.25 0.80102
0.18238 0.75 0.08233
0.375 0.75 0.68899
0.80837 0.55738 0.82409
0.19263 0.4375 0.46791
0.18337 0.55837 0.38213
0.0 0.43238 0.83233
0.80738 0.0625 0.46791
0.625 0.75 0.9786
0.0 0.625 0.36716
0.68337 0.875 0.84465
0.19163 0.75 0.11038
0.0 0.56763 0.16767
0.5 0.44163 0.55102
"""
self.coord = "relative"
self.cages = """
12 0.5 1.5 1.0
15 0.0 0.75 0.56207
14 0.23349 0.75 0.34505
14 0.5 -0.01651 0.59505
12 0.0 -0.5205 -0.20726
12 -0.7705 -1.25 -0.95726
12 0.25 -0.25 0.75
12 -0.2705 0.75 0.45726
12 -0.25 -0.25 -0.25
15 0.5 0.25 0.18793
12 1.0 1.5 1.5
15 0.5 1.25 1.06207
12 0.0 -0.25 -0.15352
12 0.2705 -0.75 -0.45726
14 1.0 0.51651 0.90495
14 0.0 0.01651 0.09505
12 -0.2705 1.25 0.54274
14 -0.23349 -0.75 -0.34505
15 0.5 -0.25 0.81207
12 0.5 1.25 0.90352
16 0.5 1.25 0.77355
14 -0.26651 -1.25 -0.84505
12 0.0 1.0 0.5
14 0.5 0.01651 0.40495
12 0.2295 1.25 0.95726
12 0.5 0.75 0.09648
14 0.26651 0.75 0.15495
14 0.5 0.48349 0.40495
16 0.0 -0.25 -0.02355
14 0.26651 1.25 0.84505
14 0.73349 1.25 0.84505
15 0.5 0.75 -0.06207
14 1.0 0.48349 1.09505
12 0.7705 1.25 0.95726
12 0.0 -0.75 -0.40352
14 0.0 0.75 0.625
14 0.5 0.25 0.125
16 0.5 0.25 0.47645
12 0.5 1.0 1.0
15 0.0 -0.75 -0.56207
14 0.76651 1.75 1.34505
16 0.5 -0.25 0.52355
14 0.5 -0.25 0.875
16 0.5 0.75 0.22645
12 0.0 0.75 0.40352
16 0.0 0.75 0.27355
12 0.5 -0.5205 0.70726
12 0.5 0.25 0.34648
16 0.0 0.25 0.02355
14 0.0 0.25 0.375
16 0.0 -0.75 -0.27355
12 0.7705 0.75 0.04274
12 0.5 -0.0205 0.29274
12 0.5 -0.25 0.65352
12 1.2705 1.75 1.45726
12 0.5 0.5205 0.29274
15 0.0 0.25 0.31207
12 0.0 0.5205 0.20726
12 -0.25 0.25 -0.75
12 0.5 1.0205 0.70726
14 0.23349 1.25 0.65495
12 1.0 1.0205 0.79274
12 0.0 0.25 0.15352
12 1.0 -0.0205 1.20726
14 0.0 -0.01651 -0.09505
15 0.0 -0.25 -0.31207
14 0.5 0.51651 0.59505
12 0.25 0.25 0.25
"""
self.bondlen = 3
self.cell = """
13.27885453677605 13.27885453677605 109.13204656869078
"""
self.density = 0.6026844037544047
self.cell = cellvectors(a=13.27885453677605,
b=13.27885453677605,
c=109.13204656869078) | PypiClean |
/ExcelConverter-0.1.35.tar.gz/ExcelConverter-0.1.35/converter/excel_compiler.py |
import networkx as nx
from networkx.classes.digraph import DiGraph
from .excel_node import *
from .excel_util import *
from .excel_lib import *
from .excel_wrapper import ExcelOpxWrapper as ExcelWrapperImpl
from .excel_wrapper import OpxRange
from .tokenizer import ExcelParser, f_token
logger = logging.getLogger(__name__)
# Set the sheet into a node
def add_node_to_graph(g, n):
g.add_node(n)
g.node[n]['sheet'] = n.sheet
if isinstance(n, Cell):
g.node[n]['label'] = n.col + str(n.row)
else:
# strip the sheet
g.node[n]['label'] = n.address()[n.address().find('!') + 1:]
class ExcelCompiler(object):
def __init__(self, filename=None, *args, **kwargs):
# Load file
super(ExcelCompiler, self).__init__()
self.filename = filename
# Set the filename
self.excel = ExcelWrapperImpl(filename=filename)
self.excel.connect()
self.log = logging.getLogger("decode.{0}".format(self.__class__.__name__))
def cell2code(self, cell):
# Generate python code for the given cell
if cell.formula is not None:
exp = shunting_yard(cell.formula or str(cell.value))
if exp is None or len(exp) == 0:
raise Exception("Incorrect expression: [%s], cell: [%s], "
"cell.formula: [%s], cell.value: [%s]"
% (exp, cell, cell.formula, cell.value))
# Correct the ranges found concerning the excel max col and row
for e in exp:
if isinstance(e, RangeNode):
e.token.tvalue = correct_range_single(self.excel, e.token.tvalue, cell.sheet)
ast, root = build_ast(exp)
code = root.emit(ast, context=Context(cell, self.excel))
elif isinstance(cell.value, str) and re.match('[A-Za-z]+', cell.value):
ast = None
code = cell.value
else:
ast = None
code = str('"' + cell.value + '"' if isinstance(cell.value, str) else cell.value)
return code, ast
def gen_graph(self, rng, sheet=None):
return self.gen_graph_multi({sheet: rng})
"""
Example usage:
gen_graph({'Output1': 'A1:B5', 'Output2': 'A1:E8'})
gen_graph({'Output1': '[A1:B5, AB4:FE85]', 'Output2': 'A1:E8'})
"""
def gen_graph_multi(self, ranges):
# When given a starting point (e.g., A6 or A3:B7) on a particular sheet,
# generate a Spreadsheet instance that captures the logic and control flow of the equations
# starting points
# cursheet = sheet if sheet else self.excel.get_active_sheet()
# self.excel.set_sheet(cursheet)
# no need to output NumberRows and NumberColumns here,
# since seed can be a list of unlinked cells
seeds = None
try:
seeds = Cell.make_cells_multi(self.excel, ranges)
if logger.isEnabledFor(logging.DEBUG):
logger.info("Ranges [%s] expanded into [%s] cell_len", ranges, seeds)
return self.gen_graph_intern(seeds)
except Exception as ex:
raise Exception("Error making cells, ranges: [%s], "
"excel: [%s]" % (ranges, self.excel)) from ex
def gen_graph_intern(self, seeds):
# only keep seeds with formulas or numbers
# Scan seeds for data_types int, float or strings
seeds = [s for s in seeds if s.formula or
isinstance(s.value, (int, float, str))]
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Filtered seeds (keeping formulas, "
"strings, ints, floats): [%s] ", len(seeds))
todo = [s for s in seeds if s.formula]
if logger.isEnabledFor(logging.DEBUG):
logger.info("Todo cells length: [%s]", len(todo))
# map of all cells
cellmap = dict([(x.address(), x) for x in seeds])
# directed graph
graph = nx.DiGraph()
# match the info in cellmap
for c in list(cellmap.values()):
add_node_to_graph(graph, c)
cursheet = self.excel.get_active_sheet()
while todo:
c1 = todo.pop()
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Processing formula cell: [%s]", c1)
# set the current sheet so relative addresses resolve properly
if c1.sheet != cursheet:
cursheet = c1.sheet
self.excel.set_sheet(cursheet)
try:
pystr, ast = self.cell2code(c1) # parse the formula into code
c1.python_expression = pystr # set the code & compile it (will flag problems sooner rather than later)
c1.compile()
except Exception as ex:
raise Exception(
"Error compiling/cell2code cell: [%s], todo length: [%s]" % (c1, len(todo))) from ex
if ast is not None:
# get all the cells/ranges this formula refers to
# No need to correct for '$' as it has been moved to the cell2node() function
deps = [x.tvalue for x in ast.nodes() if isinstance(x, RangeNode)]
deps = uniqueify(deps) # remove dupes
# deps = [x.tvalue.replace('$', '') for x in ast.nodes() if isinstance(x, RangeNode)]
# deps = uniqueify(deps) # remove dupes
for dep in deps:
# Ensure to always set the active sheet, especially in case no dependency sheet is found,
# that means the dependency occurs on the same sheet as cursheet,
# but in case we just had a dependency to another
# sheet that can result in using the wrong sheet if not set.
dep_sheet = create_sheet(dep)
dep_sheet = dep_sheet if dep_sheet else cursheet
self.excel.set_sheet(dep_sheet if dep_sheet else cursheet)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Processing dependency [%s!%s] of formula cell: [%s]", dep_sheet, dep, c1)
# if the dependency is a multi-cell range, create a range object
rng_address, rng = None, None
if is_range(dep):
try:
cells, nrows, ncols, rng_details = Cell.make_cells(self.excel, dep, sheet=dep_sheet,
parent_ast=ast, cellmap=cellmap)
# this will make sure we always have an absolute address
rng_address = create_address(rng_details, sheet=dep_sheet)
# No cells created if not needed (already exists in cell map)
if cells is None or rng_address in cellmap:
# already dealt with this range
# add an edge from the range to the parent
graph.add_edge(cellmap[rng_address], cellmap[c1.address()])
if logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Skipping range dependency [%s] of dependency: [%s], "
"of formula cell: [%s], already present",
rng_address, dep, c1)
continue
# update the original range as the returned one is probably more specific if it wasn't a list
rng = CellRange(rng_details, dep, None, sheet=dep_sheet)
# cells, nrows, ncols = Cell.make_cells(self.excel, {cursheet: dep})
# save the range
# Ensure to use the original address to store it as other need it to find it.
cellmap[rng_address] = rng
# cellmap[rng.address()] = rng
# add an edge from the range to the parent
add_node_to_graph(graph, rng)
graph.add_edge(rng, cellmap[c1.address()])
# cells in the range should point to the range as their parent
target = rng
except Exception as ex:
raise Exception(
"Error making cells, cell: [%s], ast: [%s], dep: [%s], "
"rng address: [%s], rng: [%s], todo_len: [%s], seeds_len: [%s]" %
(c1, ast, dep, rng_address, rng, len(todo), len(seeds))) from ex
else:
# not a range, create the cell object
cells = (Cell.resolve_cell(self.excel, dep, sheet=dep_sheet),)
target = cellmap[c1.address()]
# process each cell
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Adding [%s] row-cells for dependency [%s!%s]", len(cells), dep_sheet, dep)
for c2 in flatten(cells):
# Check that cells are None
if c2 is not None:
# if we haven't treated this cell already
if c2.address() in cellmap:
if logger.isEnabledFor(LOG_FINE):
logger.log(LOG_FINE, "Skipping [%s] cell for dependency [%s!%s], "
"already present", c2,
dep_sheet, dep)
else:
if c2.formula:
# cell with a formula, needs to be added to the todo list
todo.append(c2)
else:
try:
# constant cell, no need for further processing, just remember to set the code
pystr, ast = self.cell2code(c2)
c2.python_expression = pystr
c2.compile()
except Exception as ex:
raise Exception(
"Error compiling/cell2code dependency cell, cell dep: [%s] cell: [%s], "
"ast: [%s], dep: [%s], "
"todo: [%s], seeds: [%s]" % (c2, c1, ast, dep, todo, seeds)) from ex
# save in the cellmap
if logger.isEnabledFor(LOG_FINE):
logger.log(LOG_FINE, "Adding cell [%s] to cellmap for dependency [%s!%s]", c2,
dep_sheet, dep)
cellmap[c2.address()] = c2
# add to the graph
add_node_to_graph(graph, c2)
# add an edge from the cell to the parent (range or cell)
graph.add_edge(cellmap[c2.address()], target)
if logger.isEnabledFor(logging.INFO):
logger.info("Graph construction done, %s nodes, %s edges, %s cellmap entries",
len(graph.nodes()), len(graph.edges()), len(cellmap))
sp = Spreadsheet(graph, cellmap)
return sp
def shunting_yard(expression):
"""
Tokenize an excel formula expression into reverse polish notation
Core algorithm taken from wikipedia with varargs extensions from
http://www.kallisti.net.nz/blog/2008/02/extension-to-the-shunting-yard-algorithm-to-allow-variable-numbers-of-arguments-to-functions/
"""
# remove leading =
if expression.startswith('='):
expression = expression[1:]
p = ExcelParser()
p.parse(expression)
# insert tokens for '(' and ')', to make things clearer below
tokens = []
for t in p.tokens.items:
if t.ttype == "function" and t.tsubtype == "start":
t.tsubtype = ""
tokens.append(t)
tokens.append(f_token('(', 'arglist', 'start'))
elif t.ttype == "function" and t.tsubtype == "stop":
tokens.append(f_token(')', 'arglist', 'stop'))
elif t.ttype == "subexpression" and t.tsubtype == "start":
t.tvalue = '('
tokens.append(t)
elif t.ttype == "subexpression" and t.tsubtype == "stop":
t.tvalue = ')'
tokens.append(t)
else:
tokens.append(t)
# http://office.microsoft.com/en-us/excel-help/calculation-operators-and-precedence-HP010078886.aspx
operators = {':': Operator(':', 8, 'left'), '': Operator(' ', 8, 'left'), ',': Operator(',', 8, 'left'),
'u-': Operator('u-', 7, 'left'), '%': Operator('%', 6, 'left'), '^': Operator('^', 5, 'left'),
'*': Operator('*', 4, 'left'), '/': Operator('/', 4, 'left'), '+': Operator('+', 3, 'left'),
'-': Operator('-', 3, 'left'), '&': Operator('&', 2, 'left'), '=': Operator('=', 1, 'left'),
'<': Operator('<', 1, 'left'), '>': Operator('>', 1, 'left'), '<=': Operator('<=', 1, 'left'),
'>=': Operator('>=', 1, 'left'), '<>': Operator('<>', 1, 'left')}
output = collections.deque()
stack = []
were_values = []
arg_count = []
for t in tokens:
if t.ttype == "operand":
output.append(create_node(t))
if were_values:
were_values.pop()
were_values.append(True)
elif t.ttype == "function":
stack.append(t)
arg_count.append(0)
if were_values:
were_values.pop()
were_values.append(True)
were_values.append(False)
elif t.ttype == "argument":
while stack and (stack[-1].tsubtype != "start"):
output.append(create_node(stack.pop()))
if were_values.pop():
arg_count[-1] += 1
were_values.append(False)
if not len(stack):
raise Exception("Mismatched or misplaced parentheses")
elif t.ttype.startswith('operator'):
if len(stack) > 0 and t.ttype.endswith('-prefix') and t.tvalue == "-":
o1 = operators['u-']
else:
o1 = operators[t.tvalue]
while stack and stack[-1].ttype.startswith('operator'):
if stack[-1].ttype.endswith('-prefix') and stack[-1].tvalue == "-":
o2 = operators['u-']
else:
o2 = operators[stack[-1].tvalue]
if ((o1.associativity == "left" and o1.precedence <= o2.precedence)
or
(o1.associativity == "right" and o1.precedence < o2.precedence)):
output.append(create_node(stack.pop()))
else:
break
stack.append(t)
elif t.tsubtype == "start":
stack.append(t)
elif t.tsubtype == "stop":
while stack and stack[-1].tsubtype != "start":
output.append(create_node(stack.pop()))
if not stack:
raise Exception("Mismatched or misplaced parentheses")
stack.pop()
if stack and stack[-1].ttype == "function":
f = create_node(stack.pop())
a = arg_count.pop()
w = were_values.pop()
if w:
a += 1
f.num_args = a
output.append(f)
while stack:
if stack[-1].tsubtype == "start" or stack[-1].tsubtype == "stop":
raise Exception("Mismatched or misplaced parentheses")
output.append(create_node(stack.pop()))
# convert to list
result = [x for x in output]
return result
def build_ast(expression):
"""build an AST from an Excel formula expression in reverse polish notation"""
# use a directed graph to store the tree
graph = DiGraph()
stack = []
for n in expression:
# Since the graph does not maintain the order of adding nodes/edges
# add an extra attribute 'pos' so we can always sort to the correct order
if isinstance(n, OperatorNode):
if n.ttype == "operator-infix":
arg2 = stack.pop()
arg1 = stack.pop()
graph.add_node(arg1, pos=1)
graph.add_node(arg2, pos=2)
graph.add_edge(arg1, n)
graph.add_edge(arg2, n)
else:
arg1 = stack.pop()
graph.add_node(arg1, pos=1)
graph.add_edge(arg1, n)
elif isinstance(n, FunctionNode):
args = [stack.pop() for _ in range(n.num_args)]
args.reverse()
for i, a in enumerate(args):
graph.add_node(a, pos=i)
graph.add_edge(a, n)
# for i in range(n.num_args):
# G.add_edge(stack.pop(),n)
else:
graph.add_node(n, pos=0)
stack.append(n)
if len(stack) < 1:
raise Exception("Error building ast, expression: [%s], graph: [%s]" % (expression, graph))
return graph, stack.pop()
class Context(object):
"""A small context object that nodes in the AST can use to emit code"""
def __init__(self, curcell, excel):
# the current cell for which we are generating code
self.curcell = curcell
# a handle to an excel instance
self.excel = excel
def create_node(t):
"""Simple factory function"""
if t.ttype == "operand":
if t.tsubtype == "range":
return RangeNode(t)
else:
return OperandNode(t)
elif t.ttype == "function":
return FunctionNode(t)
elif t.ttype.startswith("operator"):
return OperatorNode(t)
else:
return ASTNode(t)
class Operator:
"""Small wrapper class to manage operators during shunting yard"""
def __init__(self, value, precedence, associativity):
self.value = value
self.precedence = precedence
self.associativity = associativity
class Spreadsheet(object):
def __init__(self, g, cellmap):
super(Spreadsheet, self).__init__()
self.graph = g
self.cellmap = cellmap
self.__call_stack = []
self.params = None
def contains(self, cell):
return cell in self.cellmap
def plot_graph(self):
import matplotlib.pyplot as plt
pos = nx.spring_layout(self.graph, iterations=2000)
nx.draw_networkx_nodes(self.graph, pos)
nx.draw_networkx_edges(self.graph, pos, arrows=True)
nx.draw_networkx_labels(self.graph, pos)
plt.show()
def set_value(self, cell, val, is_addr=True):
if is_addr is not None:
cell = self.cellmap[cell]
if cell.value != val:
# We should always reset the successors when the value is changes,
# also when it's changed from a None value
# to a not-None value, else the others aren't informed, and will not reset it's value such that the old
# value will be used when evaluated.
self.reset_all(cell, True)
cell.value = val
# Because we set a value we indicate it as not-dirty. Suppose the cell contains a formula, like a
# reference to another cell, setting the value will overwritte the behavior at that moment.
# When we "read"
# the value through evaluation, it would (if not indicating it as not-dirty) evaluate the formula as
# it's dirty through the reset call in this method, such that it will set/store the value of the
# reference and overwrite the set value "manually" by calling this method (as if it wasn't set)
# Note: till it will be reset, such that it becomes dirty,
# the set value will be returned. When it's dirty
# will be be evaluated that will result in using the referenced value (again)
cell.dirty = False
def reset(self, cell):
self.reset_all(cell, not cell.dirty or isinstance(cell, CellRange))
# self.reset_all(cell, cell.value is not None or isinstance(cell, CellRange))
def reset_all(self, cell, reset_successors):
if logger.isEnabledFor(LOG_FINE):
logger.log(LOG_FINE, "Resetting cell, dirty: [%s], cell: [%s]", cell.dirty, cell)
if cell.reset_inprogress:
return # Already been reset, let's skip it.
# raise Exception("Cell reset in progress: [%s]" % (cell))
try:
cell.reset_inprogress = True
# Note: we split the reset method,
# as before it would only perform the reset in case the cell value isn't None
# already. However, that was restricted
# in case it was already None, such that a change in value would not reset
# the successors such that the successors would use the old value.
# CellRange instances are skipped as their value isn't used
if isinstance(cell, Cell):
cell.value = None
# If already dirty, no need to continue, we assume that it's successors been made dirty as well.
# The value of CellRange isn't used so we have to skip it
if not cell.dirty or isinstance(cell, CellRange):
if isinstance(cell, Cell):
cell.dirty = True
if reset_successors:
if logger.isEnabledFor(LOG_FINE):
childs = '\n'.join(str(p) for p in self.graph.succ[cell].keys())
logger.log(LOG_FINE, "Resetting children of: [%s]:\n[%s]", cell.address(), childs)
list(map(self.reset, self.graph.successors(cell)))
finally:
cell.reset_inprogress = False
def print_value_tree(self, addr, indent):
cell = self.cellmap[addr]
print(("%s %s = %s" % (" " * indent, addr, cell.value)))
for c in self.graph.predecessors_iter(cell):
self.print_value_tree(c.address(), indent + 1)
# def recalculate(self, addr):
# for c in list(self.cellmap.values()):
# if isinstance(c, CellRange):
# self.evaluate_range(c, is_addr=False)
# else:
# self.evaluate(c, is_addr=False)
# return self.evaluate(c)
# def evaluate_range(self, rng, is_addr=True):
# if is_addr:
# rng = self.cellmap[rng]
#
# # its important that [] gets treated as false here
# if rng.value:
# return rng.value
# elif isinstance(rng.value, str):
# return rng.value
#
# cells, nrows, ncols = rng.celladdress, rng.nrows, rng.ncols
# if 1 == nrows or 1 == ncols:
# data = [self.evaluate(c) for c in cells]
# else:
# data = [[self.evaluate(c) for c in cells[i]] for i in range(len(cells))]
#
# # Ensure to indicate the CellRange instance isn't dirty anymore, else it will get ignored next time such that
# # it's successors aren't reset and return the wrong value (example: B1(Cell)->
# B1:B2(CellRange)-> Vlookup(Cell)
# rng.value = data
# rng.dirty = False
# return data
def evaluate(self, cell, is_addr=True):
if is_addr is not None:
cell = self.cellmap[cell]
if cell.eval_inprogress:
raise Exception("Evaluation of cell already in progress, cell: [%s], call stack:\n%s"
% (cell, self.call_stack_string(self.__call_stack)))
# We only evaluate cells that have a formula and are dirty. In case they aren't dirty we return their
# current value. We don't check if the value isn't None, as None might be a valid result of a formula
# which can cause infinite, or unnecessary loops
if not cell.formula or not cell.dirty:
return cell.value
# if not cell.formula:
# return cell.value
def eval_cell(address):
# return address
try:
# Note: we have to evaluate the cell and return the value, else an expression like "A3+A4" will fail
# as python will try to "add" strings. This expression is processed by python directly and not through
# a function in the excel_lib
value = self.evaluate(address)
if not value:
try:
if value is None or len(value) == 0 and cell.formula:
# The split will fail in case it's not a valid cell address
split_address(cell.formula[1:])
# In case it concerns a cell reference,
# it should return 0 in case the references cell is None
value = 0
except Exception:
pass
return value
# return self.evaluate(address)
except:
raise self.create_exception("Problem evaluating cell [%s]", address)
def eval_range(rng):
try:
# We return the range and let the "user" (function) decide when to evaluate.
# Example: in case of a (v/h)lookup function we don't want to evaluate the complete search
# range before feeding it to the lookup function, as the lookup function only use small parts
# of the search rang. And evaluating a huge range can easily lead to circular references.
return rng
# return self.evaluate_range(rng)
except:
raise self.create_exception("Problem evaluating cell [%s]", rng)
try:
cell.eval_inprogress = True
if logger.isEnabledFor(LOG_FINE):
logger.log(LOG_FINE, "Evaluating: %s, %s", cell.address(), cell.python_expression)
if is_eval_allowed(self, cell):
# indicate it's not dirty anymore before evaluation occurs to overcome infinite recursive loops.
cell.dirty = False
self.__call_stack.append(str(cell))
vv = eval(cell.compiled_expression)
else:
vv = cell.compiled_expression
cell.dirty = False
cell.value = vv
digits = get_zeros(cell)
if digits is not None and not isinstance(cell.value, str) and cell.value is not None:
cell.value = round(cell.value, digits)
except Exception as ex:
if str(ex).startswith("Problem evaluating"):
raise ex
else:
raise Exception(
"Problem evaluating: [%s], call stack: \n%s" % (
cell, self.call_stack_string(self.__call_stack))) from ex
finally:
cell.eval_inprogress = False
self.__call_stack.pop()
if len(self.__call_stack) > 10000:
self.__call_stack = []
logger.error("Call stack has reached maximum. Cell: [%s]", cell)
return cell.value
def create_exception(self, msg, rng):
if rng in self.cellmap:
rng = self.cellmap[rng]
return Exception(msg % rng)
def call_stack_string(self, call_stack):
if len(call_stack) > 0:
result = ''
for x in call_stack:
result += x + '\n'
return result
return '' | PypiClean |
/Cubane-1.0.11.tar.gz/Cubane-1.0.11/cubane/backend/accounts/forms.py | from __future__ import unicode_literals
from django import forms
from django.contrib.auth.models import User, Group
from cubane.forms import BaseModelForm, BaseForm
from cubane.backend.accounts.models import ProxyUser, ProxyGroup, ProxyPermission
class AccountForm(BaseModelForm):
"""
Form for editing staff accounts.
"""
class Meta:
model = ProxyUser
fields = '__all__'
exclude = ['password', 'date_joined', 'is_staff', 'last_login', 'user_permissions']
section_fields = [
':Account Details',
'username',
'first_name',
'last_name',
'email',
'initial_password',
'initial_password_confirm',
':Permissions',
'is_active',
'is_superuser',
'groups'
]
initial_password = forms.CharField(
label='Password',
widget=forms.PasswordInput(attrs={'placeholder': 'Password'}),
help_text='Provide the initial password for this user account.'
)
initial_password_confirm = forms.CharField(
label='Password (confirm)',
widget=forms.PasswordInput(attrs={'placeholder': 'Password (confirm)'}),
help_text='Confirm the password by re-typing the password from above.'
)
def configure(self, request, instance, edit):
super(AccountForm, self).configure(request, instance, edit)
# Only superuser can change superuser field.
if not request.user.is_superuser:
del self.fields['is_superuser']
# password confirmation only available for create
if edit:
del self.fields['initial_password']
del self.fields['initial_password_confirm']
self.update_sections()
# do not present groups if we do not have groups available
if Group.objects.count() == 0:
del self.fields['groups']
self.update_sections()
def clean_username(self):
d = self.cleaned_data
username = d.get('username')
if username:
username = username.lower()
# username must be unique
users = User.objects.filter(username__iexact=username)
if self._edit and self._instance:
users = users.exclude(pk=self._instance.pk)
if users.count() > 0:
raise forms.ValidationError('A user with that username already exists.')
return username
def clean_email(self):
d = self.cleaned_data
email = d.get('email')
if email:
email = email.lower()
# email must be unique
users = User.objects.filter(email__iexact=email)
if self._edit and self._instance:
users = users.exclude(pk=self._instance.pk)
if users.count() > 0:
raise forms.ValidationError('A user with that email address already exists.')
return email
def clean_initial_password_confirm(self):
password = self.cleaned_data.get('initial_password', None)
password_confirm = self.cleaned_data.get('initial_password_confirm', None)
if password and password_confirm:
if password != password_confirm:
raise forms.ValidationError('Password Confirmation does not match Password.')
return password_confirm
class GroupForm(BaseModelForm):
"""
Form for editing account groups.
"""
class Meta:
model = ProxyGroup
fields = '__all__'
class PermissionForm(BaseModelForm):
"""
Form for editing user permissions.
"""
class Meta:
model = ProxyPermission
fields = '__all__'
class ChangePasswordForm(BaseForm):
password = forms.CharField(
label='Password',
widget=forms.PasswordInput(attrs={'placeholder': 'Password'})
)
password_confirm = forms.CharField(
label='Password (confirm)',
widget=forms.PasswordInput(attrs={'placeholder': 'Password (confirm)'})
)
def clean_password_confirm(self):
password = self.cleaned_data.get('password', None)
password_confirm = self.cleaned_data.get('password_confirm', None)
if password and password_confirm:
if password != password_confirm:
raise forms.ValidationError('Password Confirmation does not match Password.')
return password_confirm | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/prism/components/prism-gherkin.min.js | Prism.languages.gherkin={pystring:{pattern:/("""|''')[\s\S]+?\1/,alias:"string"},comment:{pattern:/((?:^|\r?\n|\r)[ \t]*)#.*/,lookbehind:!0},tag:{pattern:/((?:^|\r?\n|\r)[ \t]*)@\S*/,lookbehind:!0},feature:{pattern:/((?:^|\r?\n|\r)[ \t]*)(?:Ability|Ahoy matey!|Arwedd|Aspekt|Besigheid Behoefte|Business Need|Caracteristica|Característica|Egenskab|Egenskap|Eiginleiki|Feature|Fīča|Fitur|Fonctionnalité|Fonksyonalite|Funcionalidade|Funcionalitat|Functionalitate|Funcţionalitate|Funcționalitate|Functionaliteit|Fungsi|Funkcia|Funkcija|Funkcionalitāte|Funkcionalnost|Funkcja|Funksie|Funktionalität|Funktionalitéit|Funzionalità|Hwaet|Hwæt|Jellemző|Karakteristik|laH|Lastnost|Mak|Mogucnost|Mogućnost|Moznosti|Možnosti|OH HAI|Omadus|Ominaisuus|Osobina|Özellik|perbogh|poQbogh malja'|Potrzeba biznesowa|Požadavek|Požiadavka|Pretty much|Qap|Qu'meH 'ut|Savybė|Tính năng|Trajto|Vermoë|Vlastnosť|Właściwość|Značilnost|Δυνατότητα|Λειτουργία|Могућност|Мөмкинлек|Особина|Свойство|Үзенчәлеклелек|Функционал|Функционалност|Функция|Функціонал|תכונה|خاصية|خصوصیت|صلاحیت|کاروبار کی ضرورت|وِیژگی|रूप लेख|ਖਾਸੀਅਤ|ਨਕਸ਼ ਨੁਹਾਰ|ਮੁਹਾਂਦਰਾ|గుణము|ಹೆಚ್ಚಳ|ความต้องการทางธุรกิจ|ความสามารถ|โครงหลัก|기능|フィーチャ|功能|機能):(?:[^:]+(?:\r?\n|\r|$))*/,lookbehind:!0,inside:{important:{pattern:/(:)[^\r\n]+/,lookbehind:!0},keyword:/[^:\r\n]+:/}},scenario:{pattern:/((?:^|\r?\n|\r)[ \t]*)(?:Abstract Scenario|Abstrakt Scenario|Achtergrond|Aer|Ær|Agtergrond|All y'all|Antecedentes|Antecedents|Atburðarás|Atburðarásir|Awww, look mate|B4|Background|Baggrund|Bakgrund|Bakgrunn|Bakgrunnur|Beispiele|Beispiller|Bối cảnh|Cefndir|Cenario|Cenário|Cenario de Fundo|Cenário de Fundo|Cenarios|Cenários|Contesto|Context|Contexte|Contexto|Conto|Contoh|Contone|Dæmi|Dasar|Dead men tell no tales|Delineacao do Cenario|Delineação do Cenário|Dis is what went down|Dữ liệu|Dyagram senaryo|Dyagram Senaryo|Egzanp|Ejemplos|Eksempler|Ekzemploj|Enghreifftiau|Esbozo do escenario|Escenari|Escenario|Esempi|Esquema de l'escenari|Esquema del escenario|Esquema do Cenario|Esquema do Cenário|Examples|EXAMPLZ|Exempel|Exemple|Exemples|Exemplos|First off|Fono|Forgatókönyv|Forgatókönyv vázlat|Fundo|Geçmiş|ghantoH|Grundlage|Hannergrond|Háttér|Heave to|Istorik|Juhtumid|Keadaan|Khung kịch bản|Khung tình huống|Kịch bản|Koncept|Konsep skenario|Kontèks|Kontekst|Kontekstas|Konteksts|Kontext|Konturo de la scenaro|Latar Belakang|lut|lut chovnatlh|lutmey|Lýsing Atburðarásar|Lýsing Dæma|Menggariskan Senario|MISHUN|MISHUN SRSLY|mo'|Náčrt Scenára|Náčrt Scénáře|Náčrt Scenáru|Oris scenarija|Örnekler|Osnova|Osnova Scenára|Osnova scénáře|Osnutek|Ozadje|Paraugs|Pavyzdžiai|Példák|Piemēri|Plan du scénario|Plan du Scénario|Plan senaryo|Plan Senaryo|Plang vum Szenario|Pozadí|Pozadie|Pozadina|Príklady|Příklady|Primer|Primeri|Primjeri|Przykłady|Raamstsenaarium|Reckon it's like|Rerefons|Scenár|Scénář|Scenarie|Scenarij|Scenarijai|Scenarijaus šablonas|Scenariji|Scenārijs|Scenārijs pēc parauga|Scenarijus|Scenario|Scénario|Scenario Amlinellol|Scenario Outline|Scenario Template|Scenariomal|Scenariomall|Scenarios|Scenariu|Scenariusz|Scenaro|Schema dello scenario|Se ðe|Se the|Se þe|Senario|Senaryo|Senaryo deskripsyon|Senaryo Deskripsyon|Senaryo taslağı|Shiver me timbers|Situācija|Situai|Situasie|Situasie Uiteensetting|Skenario|Skenario konsep|Skica|Structura scenariu|Structură scenariu|Struktura scenarija|Stsenaarium|Swa|Swa hwaer swa|Swa hwær swa|Szablon scenariusza|Szenario|Szenariogrundriss|Tapaukset|Tapaus|Tapausaihio|Taust|Tausta|Template Keadaan|Template Senario|Template Situai|The thing of it is|Tình huống|Variantai|Voorbeelde|Voorbeelden|Wharrimean is|Yo\-ho\-ho|You'll wanna|Założenia|Παραδείγματα|Περιγραφή Σεναρίου|Σενάρια|Σενάριο|Υπόβαθρο|Кереш|Контекст|Концепт|Мисаллар|Мисоллар|Основа|Передумова|Позадина|Предистория|Предыстория|Приклади|Пример|Примери|Примеры|Рамка на сценарий|Скица|Структура сценарија|Структура сценария|Структура сценарію|Сценарий|Сценарий структураси|Сценарийның төзелеше|Сценарији|Сценарио|Сценарій|Тарих|Үрнәкләр|דוגמאות|רקע|תבנית תרחיש|תרחיש|الخلفية|الگوی سناریو|امثلة|پس منظر|زمینه|سناریو|سيناريو|سيناريو مخطط|مثالیں|منظر نامے کا خاکہ|منظرنامہ|نمونه ها|उदाहरण|परिदृश्य|परिदृश्य रूपरेखा|पृष्ठभूमि|ਉਦਾਹਰਨਾਂ|ਪਟਕਥਾ|ਪਟਕਥਾ ਢਾਂਚਾ|ਪਟਕਥਾ ਰੂਪ ਰੇਖਾ|ਪਿਛੋਕੜ|ఉదాహరణలు|కథనం|నేపథ్యం|సన్నివేశం|ಉದಾಹರಣೆಗಳು|ಕಥಾಸಾರಾಂಶ|ವಿವರಣೆ|ಹಿನ್ನೆಲೆ|โครงสร้างของเหตุการณ์|ชุดของตัวอย่าง|ชุดของเหตุการณ์|แนวคิด|สรุปเหตุการณ์|เหตุการณ์|배경|시나리오|시나리오 개요|예|サンプル|シナリオ|シナリオアウトライン|シナリオテンプレ|シナリオテンプレート|テンプレ|例|例子|剧本|剧本大纲|劇本|劇本大綱|场景|场景大纲|場景|場景大綱|背景):[^:\r\n]*/,lookbehind:!0,inside:{important:{pattern:/(:)[^\r\n]*/,lookbehind:!0},keyword:/[^:\r\n]+:/}},"table-body":{pattern:/((?:\r?\n|\r)[ \t]*\|.+\|[^\r\n]*)+/,lookbehind:!0,inside:{outline:{pattern:/<[^>]+?>/,alias:"variable"},td:{pattern:/\s*[^\s|][^|]*/,alias:"string"},punctuation:/\|/}},"table-head":{pattern:/(?:\r?\n|\r)[ \t]*\|.+\|[^\r\n]*/,inside:{th:{pattern:/\s*[^\s|][^|]*/,alias:"variable"},punctuation:/\|/}},atrule:{pattern:/((?:\r?\n|\r)[ \t]+)(?:'ach|'a|'ej|7|a|A také|A taktiež|A tiež|A zároveň|Aber|Ac|Adott|Akkor|Ak|Aleshores|Ale|Ali|Allora|Alors|Als|Ama|Amennyiben|Amikor|Ampak|an|AN|Ananging|And y'all|And|Angenommen|Anrhegedig a|An|Apabila|Atès|Atesa|Atunci|Avast!|Aye|A|awer|Bagi|Banjur|Bet|Biết|Blimey!|Buh|But at the end of the day I reckon|But y'all|But|BUT|Cal|Când|Cando|Cand|Ce|Cuando|Če|Ða ðe|Ða|Dadas|Dada|Dados|Dado|DaH ghu' bejlu'|dann|Dann|Dano|Dan|Dar|Dat fiind|Data|Date fiind|Date|Dati fiind|Dati|Daţi fiind|Dați fiind|Dato|DEN|Den youse gotta|Dengan|De|Diberi|Diyelim ki|Donada|Donat|Donitaĵo|Do|Dun|Duota|Ðurh|Eeldades|Ef|Eğer ki|Entao|Então|Entón|Entonces|En|Epi|E|És|Etant donnée|Etant donné|Et|Étant données|Étant donnée|Étant donné|Etant données|Etant donnés|Étant donnés|Fakat|Gangway!|Gdy|Gegeben seien|Gegeben sei|Gegeven|Gegewe|ghu' noblu'|Gitt|Given y'all|Given|Givet|Givun|Ha|Cho|I CAN HAZ|In|Ir|It's just unbelievable|I|Ja|Jeśli|Jeżeli|Kadar|Kada|Kad|Kai|Kaj|Když|Keď|Kemudian|Ketika|Khi|Kiedy|Ko|Kuid|Kui|Kun|Lan|latlh|Le sa a|Let go and haul|Le|Lè sa a|Lè|Logo|Lorsqu'<|Lorsque|mä|Maar|Mais|Mając|Majd|Maka|Manawa|Mas|Ma|Menawa|Men|Mutta|Nalikaning|Nalika|Nanging|Når|När|Nato|Nhưng|Niin|Njuk|O zaman|Og|Och|Oletetaan|Onda|Ond|Oraz|Pak|Pero|Però|Podano|Pokiaľ|Pokud|Potem|Potom|Privzeto|Pryd|qaSDI'|Quando|Quand|Quan|Så|Sed|Se|Siis|Sipoze ke|Sipoze Ke|Sipoze|Si|Şi|Și|Soit|Stel|Tada|Tad|Takrat|Tak|Tapi|Ter|Tetapi|Tha the|Tha|Then y'all|Then|Thì|Thurh|Toda|Too right|ugeholl|Und|Un|Và|vaj|Vendar|Ve|wann|Wanneer|WEN|Wenn|When y'all|When|Wtedy|Wun|Y'know|Yeah nah|Yna|Youse know like when|Youse know when youse got|Y|Za predpokladu|Za předpokladu|Zadani|Zadano|Zadan|Zadate|Zadato|Zakładając|Zaradi|Zatati|Þa þe|Þa|Þá|Þegar|Þurh|Αλλά|Δεδομένου|Και|Όταν|Τότε|А також|Агар|Але|Али|Аммо|А|Әгәр|Әйтик|Әмма|Бирок|Ва|Вә|Дадено|Дано|Допустим|Если|Задате|Задати|Задато|И|І|К тому же|Када|Кад|Когато|Когда|Коли|Ләкин|Лекин|Нәтиҗәдә|Нехай|Но|Онда|Припустимо, що|Припустимо|Пусть|Также|Та|Тогда|Тоді|То|Унда|Һәм|Якщо|אבל|אזי|אז|בהינתן|וגם|כאשר|آنگاه|اذاً|اگر|اما|اور|با فرض|بالفرض|بفرض|پھر|تب|ثم|جب|عندما|فرض کیا|لكن|لیکن|متى|هنگامی|و|अगर|और|कदा|किन्तु|चूंकि|जब|तथा|तदा|तब|परन्तु|पर|यदि|ਅਤੇ|ਜਦੋਂ|ਜਿਵੇਂ ਕਿ|ਜੇਕਰ|ਤਦ|ਪਰ|అప్పుడు|ఈ పరిస్థితిలో|కాని|చెప్పబడినది|మరియు|ಆದರೆ|ನಂತರ|ನೀಡಿದ|ಮತ್ತು|ಸ್ಥಿತಿಯನ್ನು|กำหนดให้|ดังนั้น|แต่|เมื่อ|และ|그러면<|그리고<|단<|만약<|만일<|먼저<|조건<|하지만<|かつ<|しかし<|ただし<|ならば<|もし<|並且<|但し<|但是<|假如<|假定<|假設<|假设<|前提<|同时<|同時<|并且<|当<|當<|而且<|那么<|那麼<)(?=[ \t]+)/,lookbehind:!0},string:{pattern:/"(?:\\.|[^"\\\r\n])*"|'(?:\\.|[^'\\\r\n])*'/,inside:{outline:{pattern:/<[^>]+?>/,alias:"variable"}}},outline:{pattern:/<[^>]+?>/,alias:"variable"}}; | PypiClean |
/GenIce2-2.1.7.1.tar.gz/GenIce2-2.1.7.1/genice2/lattices/sIII.py | import genice2.lattices
from genice2.cell import cellvectors
class Lattice(genice2.lattices.Lattice):
def __init__(self):
self.density = 0.8 # default self.density
self.bondlen = 3 # bond threshold
self.coord = "absolute"
self.cell = """
22.3842 22.3842 11.6954
"""
self.waters = """
16.181090496 4.2865743 0.0
11.033619864 5.91614406 8.03181595
18.311170968 10.087215888 2.15019929
12.150367602 10.233832398 3.669782612
8.17359063 19.3847172 3.718084614
13.306511532 0.575497782 4.504249402
1.298059758 4.674044802 0.0
11.767597782 20.269788468 10.351949402
11.1921 0.0 8.77155
7.951763208 17.753804388 5.8477
2.078596812 2.078596812 0.0
7.652486454 12.818088288 0.0
5.252900214 8.407953204 3.62849785
18.844586454 20.758211712 5.8477
17.710155198 21.086140242 0.0
14.432436792 4.630395612 5.8477
17.10824406 0.158480136 2.18411595
14.235903516 11.069434584 2.175461354
18.311170968 10.087215888 9.54520071
12.818088288 7.652486454 0.0
10.087215888 18.311170968 9.54520071
20.758211712 18.844586454 5.8477
17.328952272 8.801243598 0.0
21.086140242 17.710155198 0.0
6.203109504 18.0976257 0.0
15.822495612 19.143863208 0.0
4.073029032 12.296984112 9.54520071
9.077688468 21.808702218 7.191150598
4.674044802 1.298059758 0.0
8.407953204 5.252900214 8.06690215
7.013641386 7.013641386 2.133124006
2.390856402 6.136852272 5.8477
21.425932398 21.425932398 9.517482612
0.575497782 13.306511532 7.191150598
11.314765416 8.148296484 2.175461354
2.114411532 10.616602218 10.351949402
19.600053204 5.939199786 2.21920215
8.84063979 8.84063979 10.355224114
10.233832398 12.150367602 3.669782612
21.425932398 21.425932398 2.177917388
19.340396484 22.261534584 3.672238646
22.261534584 19.340396484 8.023161354
5.3386317 17.0455683 5.8477
4.2865743 16.181090496 0.0
15.370558614 15.370558614 9.562275994
15.265129032 21.279315888 7.99789929
19.143863208 15.822495612 0.0
13.54356021 13.54356021 1.340175886
1.625988288 3.539613546 5.8477
15.866144802 9.894040242 5.8477
19.36569063 14.1915828 2.129615386
21.808702218 9.077688468 4.504249402
2.35146021 20.03273979 4.507524114
8.407953204 5.252900214 3.62849785
11.069434584 14.235903516 2.175461354
15.265129032 21.279315888 3.69750071
5.27595594 22.225719864 9.51128405
5.8534683 5.8534683 0.0
21.808702218 9.077688468 7.191150598
17.753804388 7.951763208 5.8477
9.894040242 15.866144802 5.8477
12.296984112 4.073029032 9.54520071
11.767597782 20.269788468 1.343450598
14.235903516 11.069434584 9.519938646
22.261534584 19.340396484 3.672238646
13.306511532 0.575497782 7.191150598
3.01850937 8.1926172 2.129615386
0.575497782 13.306511532 4.504249402
2.35146021 20.03273979 7.187875886
7.013641386 7.013641386 9.562275994
19.993343598 16.247347728 5.8477
16.46805594 11.350580136 3.66358405
6.136852272 2.390856402 5.8477
3.539613546 1.625988288 5.8477
12.150367602 10.233832398 8.025617388
16.445000214 2.784146796 2.21920215
0.122665416 3.043803516 3.672238646
18.0976257 6.203109504 0.0
0.122665416 3.043803516 8.023161354
19.3847172 8.17359063 7.977315386
1.104884112 7.119070968 3.69750071
13.976246796 17.131299786 8.06690215
5.939199786 19.600053204 9.47619785
20.269788468 11.767597782 10.351949402
16.5307317 16.5307317 0.0
16.46805594 11.350580136 8.03181595
19.600053204 5.939199786 9.47619785
8.801243598 17.328952272 0.0
4.178458614 18.205741386 3.714575994
0.958267602 0.958267602 9.517482612
21.279315888 15.265129032 7.99789929
11.069434584 14.235903516 9.519938646
15.370558614 15.370558614 2.133124006
10.233832398 12.150367602 8.025617388
3.043803516 0.122665416 8.023161354
0.0 11.1921 2.92385
11.033619864 5.91614406 3.66358405
2.114411532 10.616602218 1.343450598
20.269788468 11.767597782 1.343450598
13.582956402 5.055247728 0.0
5.91614406 11.033619864 3.66358405
8.1926172 3.01850937 9.565784614
9.566111712 14.731713546 0.0
0.958267602 0.958267602 2.177917388
20.03273979 2.35146021 4.507524114
0.0 11.1921 8.77155
5.055247728 13.582956402 0.0
3.240336792 6.561704388 0.0
6.9055257 4.988990496 5.8477
17.395209504 15.4786743 5.8477
5.27595594 22.225719864 2.18411595
8.84063979 8.84063979 1.340175886
4.630395612 14.432436792 5.8477
6.518055198 12.490159758 5.8477
2.9994828 14.21060937 3.718084614
5.252900214 8.407953204 8.06690215
12.296984112 4.073029032 2.15019929
1.104884112 7.119070968 7.99789929
15.4786743 17.395209504 5.8477
8.148296484 11.314765416 2.175461354
19.36569063 14.1915828 9.565784614
7.119070968 1.104884112 7.99789929
18.205741386 4.178458614 3.714575994
17.10824406 0.158480136 9.51128405
14.1915828 19.36569063 2.129615386
16.247347728 19.993343598 5.8477
11.1921 0.0 2.92385
10.616602218 2.114411532 10.351949402
14.1915828 19.36569063 9.565784614
20.305603188 20.305603188 0.0
10.087215888 18.311170968 2.15019929
4.988990496 6.9055257 5.8477
14.21060937 2.9994828 3.718084614
8.1926172 3.01850937 2.129615386
2.784146796 16.445000214 2.21920215
19.3847172 8.17359063 3.718084614
5.91614406 11.033619864 8.03181595
5.939199786 19.600053204 2.21920215
16.445000214 2.784146796 9.47619785
6.561704388 3.240336792 0.0
10.616602218 2.114411532 1.343450598
14.731713546 9.566111712 0.0
11.350580136 16.46805594 3.66358405
9.077688468 21.808702218 4.504249402
22.225719864 5.27595594 2.18411595
13.54356021 13.54356021 10.355224114
13.976246796 17.131299786 3.62849785
20.03273979 2.35146021 7.187875886
2.784146796 16.445000214 9.47619785
3.01850937 8.1926172 9.565784614
0.158480136 17.10824406 9.51128405
17.131299786 13.976246796 8.06690215
13.270696812 9.113503188 5.8477
8.148296484 11.314765416 9.519938646
19.340396484 22.261534584 8.023161354
0.158480136 17.10824406 2.18411595
17.131299786 13.976246796 3.62849785
8.17359063 19.3847172 7.977315386
17.0455683 5.3386317 5.8477
22.225719864 5.27595594 9.51128405
4.178458614 18.205741386 7.980824006
4.073029032 12.296984112 2.15019929
18.205741386 4.178458614 7.980824006
12.490159758 6.518055198 5.8477
21.279315888 15.265129032 3.69750071
11.350580136 16.46805594 8.03181595
9.113503188 13.270696812 5.8477
3.043803516 0.122665416 3.672238646
2.9994828 14.21060937 7.977315386
7.119070968 1.104884112 3.69750071
14.21060937 2.9994828 7.977315386
11.314765416 8.148296484 9.519938646
"""
self.cages = """
12 0.2470 0.4339 1.0000
12 1.0000 1.0000 0.5000
12 0.2530 0.9339 0.5000
12 0.5661 0.7530 1.0000
12 0.7470 0.0661 0.5000
12 0.0661 0.7470 0.5000
12 0.7530 0.5661 1.0000
12 0.4339 0.2470 1.0000
12 0.9339 0.2530 0.5000
12 0.5000 0.5000 1.0000
14 0.3164 0.6836 0.2505
14 0.8164 0.8164 0.2495
14 0.6836 0.3164 0.7495
14 0.8164 0.8164 0.7505
14 0.1836 0.1836 0.2495
14 0.3164 0.6836 0.7495
14 0.4670 0.1256 0.5000
14 0.0330 0.6256 0.0000
14 0.1256 0.4670 0.5000
14 0.6836 0.3164 0.2505
14 0.1836 0.1836 0.7505
14 0.9670 0.3744 0.0000
14 0.6256 0.0330 0.0000
14 0.8744 0.5330 0.5000
14 0.5330 0.8744 0.5000
14 0.3744 0.9670 0.0000
15 0.3950 0.3950 0.5000
15 0.6050 0.6050 0.5000
15 0.1050 0.8950 1.0000
15 0.8950 0.1050 1.0000
"""
self.cell = cellvectors(a=22.3842,
b=22.3842,
c=11.6954) | PypiClean |
/GoogleAppEngineMapReduce-1.9.22.0.tar.gz/GoogleAppEngineMapReduce-1.9.22.0/mapreduce/static/jquery.json-2.2.min.js | (function($){$.toJSON=function(o)
{if(typeof(JSON)=='object'&&JSON.stringify)
return JSON.stringify(o);var type=typeof(o);if(o===null)
return"null";if(type=="undefined")
return undefined;if(type=="number"||type=="boolean")
return o+"";if(type=="string")
return $.quoteString(o);if(type=='object')
{if(typeof o.toJSON=="function")
return $.toJSON(o.toJSON());if(o.constructor===Date)
{var month=o.getUTCMonth()+1;if(month<10)month='0'+month;var day=o.getUTCDate();if(day<10)day='0'+day;var year=o.getUTCFullYear();var hours=o.getUTCHours();if(hours<10)hours='0'+hours;var minutes=o.getUTCMinutes();if(minutes<10)minutes='0'+minutes;var seconds=o.getUTCSeconds();if(seconds<10)seconds='0'+seconds;var milli=o.getUTCMilliseconds();if(milli<100)milli='0'+milli;if(milli<10)milli='0'+milli;return'"'+year+'-'+month+'-'+day+'T'+
hours+':'+minutes+':'+seconds+'.'+milli+'Z"';}
if(o.constructor===Array)
{var ret=[];for(var i=0;i<o.length;i++)
ret.push($.toJSON(o[i])||"null");return"["+ret.join(",")+"]";}
var pairs=[];for(var k in o){var name;var type=typeof k;if(type=="number")
name='"'+k+'"';else if(type=="string")
name=$.quoteString(k);else
continue;if(typeof o[k]=="function")
continue;var val=$.toJSON(o[k]);pairs.push(name+":"+val);}
return"{"+pairs.join(", ")+"}";}};$.evalJSON=function(src)
{if(typeof(JSON)=='object'&&JSON.parse)
return JSON.parse(src);return eval("("+src+")");};$.secureEvalJSON=function(src)
{if(typeof(JSON)=='object'&&JSON.parse)
return JSON.parse(src);var filtered=src;filtered=filtered.replace(/\\["\\\/bfnrtu]/g,'@');filtered=filtered.replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,']');filtered=filtered.replace(/(?:^|:|,)(?:\s*\[)+/g,'');if(/^[\],:{}\s]*$/.test(filtered))
return eval("("+src+")");else
throw new SyntaxError("Error parsing JSON, source is not valid.");};$.quoteString=function(string)
{if(string.match(_escapeable))
{return'"'+string.replace(_escapeable,function(a)
{var c=_meta[a];if(typeof c==='string')return c;c=a.charCodeAt();return'\\u00'+Math.floor(c/16).toString(16)+(c%16).toString(16);})+'"';}
return'"'+string+'"';};var _escapeable=/["\\\x00-\x1f\x7f-\x9f]/g;var _meta={'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','"':'\\"','\\':'\\\\'};})(jQuery); | PypiClean |
/FIRSTBEATLU-0.13.1.tar.gz/FIRSTBEATLU-0.13.1/econml/dynamic/dml/_dml.py |
import abc
import numpy as np
from warnings import warn
from sklearn.base import clone
from sklearn.model_selection import GroupKFold
from scipy.stats import norm
from sklearn.linear_model import (ElasticNetCV, LassoCV, LogisticRegressionCV)
from ...sklearn_extensions.linear_model import (StatsModelsLinearRegression, WeightedLassoCVWrapper)
from ...sklearn_extensions.model_selection import WeightedStratifiedKFold
from ...dml.dml import _FirstStageWrapper, _FinalWrapper
from ..._cate_estimator import TreatmentExpansionMixin, LinearModelFinalCateEstimatorMixin
from ..._ortho_learner import _OrthoLearner
from ...utilities import (_deprecate_positional, add_intercept,
broadcast_unit_treatments, check_high_dimensional,
cross_product, deprecated, fit_with_groups,
hstack, inverse_onehot, ndim, reshape,
reshape_treatmentwise_effects, shape, transpose,
get_feature_names_or_default, check_input_arrays,
filter_none_kwargs)
def _get_groups_period_filter(groups, n_periods):
group_counts = {}
group_period_filter = {i: [] for i in range(n_periods)}
for i, g in enumerate(groups):
if g not in group_counts:
group_counts[g] = 0
group_period_filter[group_counts[g]].append(i)
group_counts[g] += 1
return group_period_filter
class _DynamicModelNuisance:
"""
Nuisance model fits the model_y and model_t at fit time and at predict time
calculates the residual Y and residual T based on the fitted models and returns
the residuals as two nuisance parameters.
"""
def __init__(self, model_y, model_t, n_periods):
self._model_y = model_y
self._model_t = model_t
self.n_periods = n_periods
def fit(self, Y, T, X=None, W=None, sample_weight=None, groups=None):
"""Fit a series of nuisance models for each period or period pairs."""
assert Y.shape[0] % self.n_periods == 0, \
"Length of training data should be an integer multiple of time periods."
period_filters = _get_groups_period_filter(groups, self.n_periods)
self._model_y_trained = {}
self._model_t_trained = {j: {} for j in np.arange(self.n_periods)}
for t in np.arange(self.n_periods):
self._model_y_trained[t] = clone(self._model_y, safe=False).fit(
self._index_or_None(X, period_filters[t]),
self._index_or_None(
W, period_filters[t]),
Y[period_filters[self.n_periods - 1]])
for j in np.arange(t, self.n_periods):
self._model_t_trained[j][t] = clone(self._model_t, safe=False).fit(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]),
T[period_filters[j]])
return self
def predict(self, Y, T, X=None, W=None, sample_weight=None, groups=None):
"""Calculate nuisances for each period or period pairs.
Returns
-------
Y_res : (n, d_y) matrix or vector of length n
Y residuals for each period in panel format.
This shape is required for _OrthoLearner's crossfitting.
T_res : (n, d_t, n_periods) matrix
T residuals for pairs of periods (t, j), where the data is in panel format for t
and in index form for j. For example, the residuals for (t, j) can be retrieved via
T_res[np.arange(n) % n_periods == t, ..., j]. For t < j, the entries of this
matrix are np.nan.
This shape is required for _OrthoLearner's crossfitting.
"""
assert Y.shape[0] % self.n_periods == 0, \
"Length of training data should be an integer multiple of time periods."
period_filters = _get_groups_period_filter(groups, self.n_periods)
Y_res = np.full(Y.shape, np.nan)
T_res = np.full(T.shape + (self.n_periods, ), np.nan)
shape_formatter = self._get_shape_formatter(X, W)
for t in np.arange(self.n_periods):
Y_slice = Y[period_filters[self.n_periods - 1]]
Y_pred = self._model_y_trained[t].predict(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]))
Y_res[period_filters[t]] = Y_slice\
- shape_formatter(Y_slice, Y_pred)
for j in np.arange(t, self.n_periods):
T_slice = T[period_filters[j]]
T_pred = self._model_t_trained[j][t].predict(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]))
T_res[period_filters[j], ..., t] = T_slice\
- shape_formatter(T_slice, T_pred)
return Y_res, T_res
def score(self, Y, T, X=None, W=None, sample_weight=None, groups=None):
assert Y.shape[0] % self.n_periods == 0, \
"Length of training data should be an integer multiple of time periods."
period_filters = _get_groups_period_filter(groups, self.n_periods)
if hasattr(self._model_y, 'score'):
Y_score = np.full((self.n_periods, ), np.nan)
for t in np.arange(self.n_periods):
Y_score[t] = self._model_y_trained[t].score(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]),
Y[period_filters[self.n_periods - 1]])
else:
Y_score = None
if hasattr(self._model_t, 'score'):
T_score = np.full((self.n_periods, self.n_periods), np.nan)
for t in np.arange(self.n_periods):
for j in np.arange(t, self.n_periods):
T_score[j][t] = self._model_t_trained[j][t].score(
self._index_or_None(X, period_filters[t]),
self._index_or_None(W, period_filters[t]),
T[period_filters[j]])
else:
T_score = None
return Y_score, T_score
def _get_shape_formatter(self, X, W):
if (X is None) and (W is None):
return lambda x, x_pred: np.tile(x_pred.reshape(1, -1), (x.shape[0], 1)).reshape(x.shape)
return lambda x, x_pred: x_pred.reshape(x.shape)
def _index_or_None(self, X, filter_idx):
return None if X is None else X[filter_idx]
class _DynamicModelFinal:
"""
Final model at fit time, fits a residual on residual regression with a heterogeneous coefficient
that depends on X, i.e.
.. math ::
Y - E[Y | X, W] = \\theta(X) \\cdot (T - E[T | X, W]) + \\epsilon
and at predict time returns :math:`\\theta(X)`. The score method returns the MSE of this final
residual on residual regression.
Assumes model final is parametric with no intercept.
"""
# TODO: update docs
def __init__(self, model_final, n_periods):
self._model_final = model_final
self.n_periods = n_periods
self._model_final_trained = {k: clone(self._model_final, safe=False) for k in np.arange(n_periods)}
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None, groups=None):
# NOTE: sample weight, sample var are not passed in
period_filters = _get_groups_period_filter(groups, self.n_periods)
Y_res, T_res = nuisances
self._d_y = Y.shape[1:]
for t in np.arange(self.n_periods - 1, -1, -1):
Y_adj = Y_res[period_filters[t]].copy()
if t < self.n_periods - 1:
Y_adj -= np.sum(
[self._model_final_trained[j].predict_with_res(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[j], ..., t]
) for j in np.arange(t + 1, self.n_periods)], axis=0)
self._model_final_trained[t].fit(
X[period_filters[0]] if X is not None else None, T[period_filters[t]],
T_res[period_filters[t], ..., t], Y_adj)
return self
def predict(self, X=None):
"""
Return shape: m x dy x (p*dt)
"""
d_t_tuple = self._model_final_trained[0]._d_t
d_t = d_t_tuple[0] if d_t_tuple else 1
x_dy_shape = (X.shape[0] if X is not None else 1, ) + \
self._model_final_trained[0]._d_y
preds = np.zeros(
x_dy_shape +
(self.n_periods * d_t, )
)
for t in range(self.n_periods):
preds[..., t * d_t: (t + 1) * d_t] = \
self._model_final_trained[t].predict(X).reshape(
x_dy_shape + (d_t, )
)
return preds
def score(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None, groups=None):
assert Y.shape[0] % self.n_periods == 0, \
"Length of training data should be an integer multiple of time periods."
Y_res, T_res = nuisances
scores = np.full((self.n_periods, ), np.nan)
period_filters = _get_groups_period_filter(groups, self.n_periods)
for t in np.arange(self.n_periods - 1, -1, -1):
Y_adj = Y_res[period_filters[t]].copy()
if t < self.n_periods - 1:
Y_adj -= np.sum(
[self._model_final_trained[j].predict_with_res(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[j], ..., t]
) for j in np.arange(t + 1, self.n_periods)], axis=0)
Y_adj_pred = self._model_final_trained[t].predict_with_res(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[t], ..., t])
if sample_weight is not None:
scores[t] = np.mean(np.average((Y_adj - Y_adj_pred)**2, weights=sample_weight, axis=0))
else:
scores[t] = np.mean((Y_adj - Y_adj_pred) ** 2)
return scores
class _LinearDynamicModelFinal(_DynamicModelFinal):
"""Wrapper for the DynamicModelFinal with StatsModelsLinearRegression final model.
The final model is a linear model with (d_t*n_periods) coefficients.
This model is defined after the coefficients and covariance are calculated.
"""
def __init__(self, model_final, n_periods):
super().__init__(model_final, n_periods)
self.model_final_ = StatsModelsLinearRegression(fit_intercept=False)
def fit(self, Y, T, X=None, W=None, Z=None, nuisances=None, sample_weight=None, sample_var=None, groups=None):
super().fit(Y, T, X=X, W=W, Z=Z, nuisances=nuisances,
sample_weight=sample_weight, sample_var=sample_var, groups=groups)
# Compose final model
cov = self._get_cov(nuisances, X, groups)
coef = self._get_coef_()
self.model_final_._n_out = self._d_y[0] if self._d_y else 0
self.model_final_._param_var = cov / (Y.shape[0] / self.n_periods)
self.model_final_._param = coef.T if self.model_final_._n_out else coef
def _get_coef_(self):
period_coefs = np.array([self._model_final_trained[t]._model.coef_ for t in range(self.n_periods)])
if self._d_y:
return np.array([
np.array([period_coefs[k, i, :] for k in range(self.n_periods)]).flatten()
for i in range(self._d_y[0])
])
return period_coefs.flatten()
def _get_cov(self, nuisances, X, groups):
if self._d_y:
return np.array(
[self._fit_single_output_cov((nuisances[0][:, i], nuisances[1]), X, i, groups)
for i in range(self._d_y[0])]
)
return self._fit_single_output_cov(nuisances, X, -1, groups)
def _fit_single_output_cov(self, nuisances, X, y_index, groups):
""" Calculates the covariance (n_periods*n_treatments)
x (n_periods*n_treatments) matrix for a single outcome.
"""
Y_res, T_res = nuisances
# Calculate auxiliary quantities
period_filters = _get_groups_period_filter(groups, self.n_periods)
# X ⨂ T_res
XT_res = np.array([
[
self._model_final_trained[0]._combine(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[t], ..., j],
fitting=False
)
for j in range(self.n_periods)
]
for t in range(self.n_periods)
])
d_xt = XT_res.shape[-1]
# sum(model_final.predict(X, T_res))
Y_diff = np.array([
np.sum([
self._model_final_trained[j].predict_with_res(
X[period_filters[0]] if X is not None else None,
T_res[period_filters[j], ..., t]
) for j in np.arange(t, self.n_periods)],
axis=0
)
for t in np.arange(self.n_periods)
])
J = np.zeros((self.n_periods * d_xt,
self.n_periods * d_xt))
Sigma = np.zeros((self.n_periods * d_xt,
self.n_periods * d_xt))
for t in np.arange(self.n_periods):
res_epsilon_t = (Y_res[period_filters[t]] -
(Y_diff[t][:, y_index] if y_index >= 0 else Y_diff[t])
).reshape(-1, 1, 1)
resT_t = XT_res[t][t]
for j in np.arange(self.n_periods):
# Calculating the (t, j) block entry (of size n_treatments x n_treatments) of matrix Sigma
res_epsilon_j = (Y_res[period_filters[j]] -
(Y_diff[j][:, y_index] if y_index >= 0 else Y_diff[j])
).reshape(-1, 1, 1)
resT_j = XT_res[j][j]
cov_resT_tj = resT_t.reshape(-1, d_xt, 1) @ resT_j.reshape(-1, 1, d_xt)
sigma_tj = np.mean((res_epsilon_t * res_epsilon_j) * cov_resT_tj, axis=0)
Sigma[t * d_xt:(t + 1) * d_xt,
j * d_xt:(j + 1) * d_xt] = sigma_tj
if j >= t:
# Calculating the (t, j) block entry (of size n_treatments x n_treatments) of matrix J
m_tj = np.mean(
XT_res[j][t].reshape(-1, d_xt, 1) @ resT_t.reshape(-1, 1, d_xt),
axis=0)
J[t * d_xt:(t + 1) * d_xt,
j * d_xt:(j + 1) * d_xt] = m_tj
return np.linalg.inv(J) @ Sigma @ np.linalg.inv(J).T
class _DynamicFinalWrapper(_FinalWrapper):
def predict_with_res(self, X, T_res):
fts = self._combine(X, T_res, fitting=False)
prediction = self._model.predict(fts)
if self._intercept is not None:
prediction -= self._intercept
return reshape(prediction, (prediction.shape[0],) + self._d_y)
class DynamicDML(LinearModelFinalCateEstimatorMixin, _OrthoLearner):
"""CATE estimator for dynamic treatment effect estimation.
This estimator is an extension of the Double ML approach for treatments assigned sequentially
over time periods.
The estimator is a special case of an :class:`_OrthoLearner` estimator, so it follows the two
stage process, where a set of nuisance functions are estimated in the first stage in a crossfitting
manner and a final stage estimates the CATE model. See the documentation of
:class:`._OrthoLearner` for a description of this two stage process.
Parameters
----------
model_y: estimator or 'auto', optional (default is 'auto')
The estimator for fitting the response to the features. Must implement
`fit` and `predict` methods.
If 'auto' :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV` will be chosen.
model_t: estimator or 'auto', optional (default is 'auto')
The estimator for fitting the treatment to the features.
If estimator, it must implement `fit` and `predict` methods;
If 'auto', :class:`~sklearn.linear_model.LogisticRegressionCV` will be applied for discrete treatment,
and :class:`.WeightedLassoCV`/:class:`.WeightedMultiTaskLassoCV`
will be applied for continuous treatment.
featurizer : :term:`transformer`, optional, default None
Must support fit_transform and transform. Used to create composite features in the final CATE regression.
It is ignored if X is None. The final CATE will be trained on the outcome of featurizer.fit_transform(X).
If featurizer=None, then CATE is trained on X.
fit_cate_intercept : bool, optional, default True
Whether the linear CATE model should have a constant term.
linear_first_stages: bool
Whether the first stage models are linear (in which case we will expand the features passed to
`model_y` accordingly)
discrete_treatment: bool, optional (default is ``False``)
Whether the treatment values should be treated as categorical, rather than continuous, quantities
categories: 'auto' or list, default 'auto'
The categories to use when encoding discrete treatments (or 'auto' to use the unique sorted values).
The first category will be treated as the control treatment.
cv: int, cross-validation generator or an iterable, optional (Default=2)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`
- An iterable yielding (train, test) splits as arrays of indices.
Iterables should make sure a group belongs to a single split.
For integer/None inputs, :class:`~sklearn.model_selection.GroupKFold` is used
Unless an iterable is used, we call `split(X, T, groups)` to generate the splits.
mc_iters: int, optional (default=None)
The number of times to rerun the first stage models to reduce the variance of the nuisances.
mc_agg: {'mean', 'median'}, optional (default='mean')
How to aggregate the nuisance value for each sample across the `mc_iters` monte carlo iterations of
cross-fitting.
random_state: int, :class:`~numpy.random.mtrand.RandomState` instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If :class:`~numpy.random.mtrand.RandomState` instance, random_state is the random number generator;
If None, the random number generator is the :class:`~numpy.random.mtrand.RandomState` instance used
by :mod:`np.random<numpy.random>`.
Examples
--------
A simple example with default models:
.. testcode::
:hide:
import numpy as np
np.set_printoptions(suppress=True)
.. testcode::
from econml.dynamic.dml import DynamicDML
np.random.seed(123)
n_panels = 100 # number of panels
n_periods = 3 # number of time periods per panel
n = n_panels * n_periods
groups = np.repeat(a=np.arange(n_panels), repeats=n_periods, axis=0)
X = np.random.normal(size=(n, 1))
T = np.random.normal(size=(n, 2))
y = np.random.normal(size=(n, ))
est = DynamicDML()
est.fit(y, T, X=X, W=None, groups=groups, inference="auto")
>>> est.const_marginal_effect(X[:2])
array([[-0.336..., -0.048..., -0.061..., 0.042..., -0.204...,
0.00667271],
[-0.101..., 0.433..., 0.054..., -0.217..., -0.101...,
-0.159...]])
>>> est.effect(X[:2], T0=0, T1=1)
array([-0.601..., -0.091...])
>>> est.effect(X[:2], T0=np.zeros((2, n_periods*T.shape[1])), T1=np.ones((2, n_periods*T.shape[1])))
array([-0.601..., -0.091...])
>>> est.coef_
array([[ 0.112...],
[ 0.231...],
[ 0.055...],
[-0.125...],
[ 0.049...],
[-0.079...]])
>>> est.coef__interval()
(array([[-0.063...],
[-0.009...],
[-0.114...],
[-0.413...],
[-0.117...],
[-0.262...]]), array([[0.289...],
[0.471...],
[0.225...],
[0.163...],
[0.216...],
[0.103...]]))
"""
def __init__(self, *,
model_y='auto', model_t='auto',
featurizer=None,
fit_cate_intercept=True,
linear_first_stages=False,
discrete_treatment=False,
categories='auto',
cv=2,
mc_iters=None,
mc_agg='mean',
random_state=None):
self.fit_cate_intercept = fit_cate_intercept
self.linear_first_stages = linear_first_stages
self.featurizer = clone(featurizer, safe=False)
self.model_y = clone(model_y, safe=False)
self.model_t = clone(model_t, safe=False)
super().__init__(discrete_treatment=discrete_treatment,
discrete_instrument=False,
categories=categories,
cv=GroupKFold(cv) if isinstance(cv, int) else cv,
mc_iters=mc_iters,
mc_agg=mc_agg,
random_state=random_state)
def _gen_featurizer(self):
return clone(self.featurizer, safe=False)
def _gen_model_y(self):
if self.model_y == 'auto':
model_y = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_y = clone(self.model_y, safe=False)
return _FirstStageWrapper(model_y, True, self._gen_featurizer(),
self.linear_first_stages, self.discrete_treatment)
def _gen_model_t(self):
if self.model_t == 'auto':
if self.discrete_treatment:
model_t = LogisticRegressionCV(cv=WeightedStratifiedKFold(random_state=self.random_state),
random_state=self.random_state)
else:
model_t = WeightedLassoCVWrapper(random_state=self.random_state)
else:
model_t = clone(self.model_t, safe=False)
return _FirstStageWrapper(model_t, False, self._gen_featurizer(),
self.linear_first_stages, self.discrete_treatment)
def _gen_model_final(self):
return StatsModelsLinearRegression(fit_intercept=False)
def _gen_ortho_learner_model_nuisance(self, n_periods):
return _DynamicModelNuisance(
model_t=self._gen_model_t(),
model_y=self._gen_model_y(),
n_periods=n_periods)
def _gen_ortho_learner_model_final(self, n_periods):
wrapped_final_model = _DynamicFinalWrapper(
StatsModelsLinearRegression(fit_intercept=False),
fit_cate_intercept=self.fit_cate_intercept,
featurizer=self.featurizer,
use_weight_trick=False)
return _LinearDynamicModelFinal(wrapped_final_model, n_periods=n_periods)
def _prefit(self, Y, T, *args, groups=None, only_final=False, **kwargs):
u_periods = np.unique(np.unique(groups, return_counts=True)[1])
if len(u_periods) > 1:
raise AttributeError(
"Imbalanced panel. Method currently expects only panels with equal number of periods. Pad your data")
self._n_periods = u_periods[0]
# generate an instance of the final model
self._ortho_learner_model_final = self._gen_ortho_learner_model_final(self._n_periods)
if not only_final:
# generate an instance of the nuisance model
self._ortho_learner_model_nuisance = self._gen_ortho_learner_model_nuisance(self._n_periods)
TreatmentExpansionMixin._prefit(self, Y, T, *args, **kwargs)
def _postfit(self, Y, T, *args, **kwargs):
super()._postfit(Y, T, *args, **kwargs)
# Set _d_t to effective number of treatments
self._d_t = (self._n_periods * self._d_t[0], ) if self._d_t else (self._n_periods, )
def _strata(self, Y, T, X=None, W=None, Z=None,
sample_weight=None, sample_var=None, groups=None,
cache_values=False, only_final=False, check_input=True):
# Required for bootstrap inference
return groups
def fit(self, Y, T, *, X=None, W=None, sample_weight=None, sample_var=None, groups,
cache_values=False, inference='auto'):
"""Estimate the counterfactual model from data, i.e. estimates function :math:`\\theta(\\cdot)`.
The input data must contain groups with the same size corresponding to the number
of time periods the treatments were assigned over.
The data should be preferably in panel format, with groups clustered together.
If group members do not appear together, the following is assumed:
* the first instance of a group in the dataset is assumed to correspond to the first period of that group
* the second instance of a group in the dataset is assumed to correspond to the
second period of that group
...etc.
Only the value of the features X at the first period of each unit are used for
heterogeneity. The value of X in subseuqnet periods is used as a time-varying control
but not for heterogeneity.
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample (required: n = n_groups * n_periods)
T: (n, d_t) matrix or vector of length n
Treatments for each sample (required: n = n_groups * n_periods)
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample (Required: n = n_groups * n_periods). Only first
period features from each unit are used for heterogeneity, the rest are
used as time-varying controls together with W
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample (Required: n = n_groups * n_periods)
sample_weight: optional(n,) vector or None (Default=None)
Weights for each samples
sample_var: optional(n,) vector or None (Default=None)
Sample variance for each sample
groups: (n,) vector, required
All rows corresponding to the same group will be kept together during splitting.
If groups is not None, the `cv` argument passed to this class's initializer
must support a 'groups' argument to its split method.
cache_values: bool, default False
Whether to cache inputs and first stage results, which will allow refitting a different final model
inference: string,:class:`.Inference` instance, or None
Method for performing inference. This estimator supports 'bootstrap'
(or an instance of :class:`.BootstrapInference`) and 'auto'
(or an instance of :class:`.LinearModelFinalInference`).
Returns
-------
self: DynamicDML instance
"""
if sample_weight is not None or sample_var is not None:
warn("This CATE estimator does not yet support sample weights and sample variance. "
"These inputs will be ignored during fitting.",
UserWarning)
return super().fit(Y, T, X=X, W=W,
sample_weight=None, sample_var=None, groups=groups,
cache_values=cache_values,
inference=inference)
def score(self, Y, T, X=None, W=None, sample_weight=None, *, groups):
"""
Score the fitted CATE model on a new data set. Generates nuisance parameters
for the new data set based on the fitted residual nuisance models created at fit time.
It uses the mean prediction of the models fitted by the different crossfit folds.
Then calculates the MSE of the final residual Y on residual T regression.
If model_final does not have a score method, then it raises an :exc:`.AttributeError`
Parameters
----------
Y: (n, d_y) matrix or vector of length n
Outcomes for each sample (required: n = n_groups * n_periods)
T: (n, d_t) matrix or vector of length n
Treatments for each sample (required: n = n_groups * n_periods)
X: optional(n, d_x) matrix or None (Default=None)
Features for each sample (Required: n = n_groups * n_periods)
W: optional(n, d_w) matrix or None (Default=None)
Controls for each sample (Required: n = n_groups * n_periods)
groups: (n,) vector, required
All rows corresponding to the same group will be kept together during splitting.
Returns
-------
score: float
The MSE of the final CATE model on the new data.
"""
if not hasattr(self._ortho_learner_model_final, 'score'):
raise AttributeError("Final model does not have a score method!")
Y, T, X, W, groups = check_input_arrays(Y, T, X, W, groups)
self._check_fitted_dims(X)
X, T = super()._expand_treatments(X, T)
n_iters = len(self._models_nuisance)
n_splits = len(self._models_nuisance[0])
# for each mc iteration
for i, models_nuisances in enumerate(self._models_nuisance):
# for each model under cross fit setting
for j, mdl in enumerate(models_nuisances):
nuisance_temp = mdl.predict(Y, T, **filter_none_kwargs(X=X, W=W, groups=groups))
if not isinstance(nuisance_temp, tuple):
nuisance_temp = (nuisance_temp,)
if i == 0 and j == 0:
nuisances = [np.zeros((n_iters * n_splits,) + nuis.shape) for nuis in nuisance_temp]
for it, nuis in enumerate(nuisance_temp):
nuisances[it][i * n_iters + j] = nuis
for it in range(len(nuisances)):
nuisances[it] = np.mean(nuisances[it], axis=0)
return self._ortho_learner_model_final.score(Y, T, nuisances=nuisances,
**filter_none_kwargs(X=X, W=W,
sample_weight=sample_weight, groups=groups))
def cate_treatment_names(self, treatment_names=None):
"""
Get treatment names for each time period.
If the treatment is discrete, it will return expanded treatment names.
Parameters
----------
treatment_names: list of strings of length T.shape[1] or None
The names of the treatments. If None and the T passed to fit was a dataframe,
it defaults to the column names from the dataframe.
Returns
-------
out_treatment_names: list of strings
Returns (possibly expanded) treatment names.
"""
slice_treatment_names = super().cate_treatment_names(treatment_names)
treatment_names_out = []
for k in range(self._n_periods):
treatment_names_out += [f"({t})$_{k}$" for t in slice_treatment_names]
return treatment_names_out
def cate_feature_names(self, feature_names=None):
"""
Get the output feature names.
Parameters
----------
feature_names: list of strings of length X.shape[1] or None
The names of the input features. If None and X is a dataframe, it defaults to the column names
from the dataframe.
Returns
-------
out_feature_names: list of strings or None
The names of the output features :math:`\\phi(X)`, i.e. the features with respect to which the
final constant marginal CATE model is linear. It is the names of the features that are associated
with each entry of the :meth:`coef_` parameter. Not available when the featurizer is not None and
does not have a method: `get_feature_names(feature_names)`. Otherwise None is returned.
"""
if self._d_x is None:
# Handles the corner case when X=None but featurizer might be not None
return None
if feature_names is None:
feature_names = self._input_names["feature_names"]
if self.original_featurizer is None:
return feature_names
return get_feature_names_or_default(self.original_featurizer, feature_names)
def _expand_treatments(self, X, *Ts):
# Expand treatments for each time period
outTs = []
base_expand_treatments = super()._expand_treatments
for T in Ts:
if ndim(T) == 0:
one_T = base_expand_treatments(X, T)[1]
one_T = one_T.reshape(-1, 1) if ndim(one_T) == 1 else one_T
T = np.tile(one_T, (1, self._n_periods, ))
else:
assert (T.shape[1] == self._n_periods if self.transformer else T.shape[1] == self._d_t[0]), \
f"Expected a list of time period * d_t, instead got a treatment array of shape {T.shape}."
if self.transformer:
T = np.hstack([
base_expand_treatments(
X, T[:, [t]])[1] for t in range(self._n_periods)
])
outTs.append(T)
return (X,) + tuple(outTs)
@property
def bias_part_of_coef(self):
return self.ortho_learner_model_final_._model_final._fit_cate_intercept
@property
def fit_cate_intercept_(self):
return self.ortho_learner_model_final_._model_final._fit_cate_intercept
@property
def original_featurizer(self):
# NOTE: important to use the _ortho_learner_model_final_ attribute instead of the
# attribute so that the trained featurizer will be passed through
return self.ortho_learner_model_final_._model_final_trained[0]._original_featurizer
@property
def featurizer_(self):
# NOTE This is used by the inference methods and has to be the overall featurizer. intended
# for internal use by the library
return self.ortho_learner_model_final_._model_final_trained[0]._featurizer
@property
def model_final_(self):
# NOTE This is used by the inference methods and is more for internal use to the library
# We need to use the _ortho_learner's copy to retain the information from fitting
return self.ortho_learner_model_final_.model_final_
@property
def model_final(self):
return self._gen_model_final()
@model_final.setter
def model_final(self, model):
if model is not None:
raise ValueError("Parameter `model_final` cannot be altered for this estimator!")
@property
def models_y(self):
return [[mdl._model_y for mdl in mdls] for mdls in super().models_nuisance_]
@property
def models_t(self):
return [[mdl._model_t for mdl in mdls] for mdls in super().models_nuisance_]
@property
def nuisance_scores_y(self):
return self.nuisance_scores_[0]
@property
def nuisance_scores_t(self):
return self.nuisance_scores_[1]
@property
def residuals_(self):
"""
A tuple (y_res, T_res, X, W), of the residuals from the first stage estimation
along with the associated X and W. Samples are not guaranteed to be in the same
order as the input order.
"""
if not hasattr(self, '_cached_values'):
raise AttributeError("Estimator is not fitted yet!")
if self._cached_values is None:
raise AttributeError("`fit` was called with `cache_values=False`. "
"Set to `True` to enable residual storage.")
Y_res, T_res = self._cached_values.nuisances
return Y_res, T_res, self._cached_values.X, self._cached_values.W | PypiClean |
/GailBot-0.2a0-py3-none-any.whl/gailbot/core/engines/watson/am.py |
# Standard library imports
from typing import Any, BinaryIO, List, TextIO, Tuple, Dict, Callable, Union
from enum import IntEnum
# Local imports
# Third party imports
from ibm_watson import SpeechToTextV1, ApiException
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from gailbot.core.engines import exception as ERR
from ibm_watson.speech_to_text_v1 import CustomWord
from gailbot.configs import watson_config_loader
from gailbot.core.utils.media import MediaHandler
import logging
logger = logging.getLogger(__name__)
from .codes import WatsonReturnCodes
WATSON_CONFIG = watson_config_loader()
class WatsonAMInterface:
"""
Responsible for interacting with the IBM Watson STT service and providing
methods for interacting with acoustic models.
"""
def __init__(self, apikey: str, region: str):
self.apikey = apikey
self.region = region
self.media_h = MediaHandler()
self.connected_to_service = False
# Parse confs
self._regions = WATSON_CONFIG.regions_uris
self._format_to_content_types = WATSON_CONFIG.format_to_content
self._defaults = WATSON_CONFIG.defaults
self.max_size_bytes = WATSON_CONFIG.max_file_size_bytes
if not self._is_api_key_valid(apikey, self._regions[region]):
raise Exception(f"Apikey {apikey} invalid")
if not region in self._regions:
raise Exception(f"Region {region} not in {list(self._regions.keys())}")
try:
# Create the stt service and run
authenticator = IAMAuthenticator(self.apikey)
self.stt = SpeechToTextV1(authenticator=authenticator)
self.stt.set_service_url(self._regions[self.region])
except:
raise ERR.ConnectionError("ERROR: Failed to connect to the Watson STT")
else:
self.connected_to_service = True
def get_custom_model(self, customization_id: str) -> Union[Dict[str, Any], None]:
"""
Obtain information regarding a specific custom acoustic model.
Args:
customization_id (str): Unique ID of the custom acoustic model.
Returns:
(Union[Dict[str,Any],None]):
Mapping from the following keys to their values:
customization_id, CREATED, updated,language, dialect,
versions, owner, name, description, base_model_name, status,
progress
None if unsuccessful.
"""
_, resp = self._execute_watson_method(
self.stt.get_acoustic_model, [WatsonReturnCodes.OK], [customization_id]
)
return resp
def get_custom_models(self) -> Dict[str, str]:
"""
Obtain all custom acoustic models
Returns:
(Dict[str,str]):
Mapping from custom acoustic model to the customization id.
"""
custom_models = dict()
response = self._execute_watson_method(
self.stt.list_acoustic_models, [WatsonReturnCodes.OK]
)
if response:
for model in response["customizations"]:
custom_models[model["name"]] = model["customization_id"]
return custom_models
def create_custom_model(
self, name: str, base_model_name: str, description: str
) -> bool:
"""
Create a new custom acoustic model.
Args:
name (str): Name of the model.
base_model_name (str):
Name of the base model. Must be a supported base model.
description (str): Description of the model.
Returns:
(bool): True if successful. False otherwise.
"""
response = self._execute_watson_method(
self.stt.create_acoustic_model,
[WatsonReturnCodes.CREATED],
[name, base_model_name],
{"description": description},
)
return response
def delete_custom_model(self, customization_id: str) -> bool:
"""
Delete an existing custom acoustic model.
Args:
customization_id (str): Unique ID of the custom acoustic model.
Returns:
(bool): True if successful. False otherwise.
"""
response = self._execute_watson_method(
self.stt.delete_acoustic_model, [WatsonReturnCodes.OK], [customization_id]
)
return response
def train_custom_model(self, customization_id: str) -> bool:
"""
Train a custom acoustic model with a previously added resource.
Args:
customization_id (str): Unique ID of the custom acoustic model.
Returns:
(bool): True if successful. False otherwise.
"""
response = self._execute_watson_method(
self.stt.train_acoustic_model, [WatsonReturnCodes.OK], [customization_id]
)
return response
def reset_custom_model(self, customization_id: str) -> bool:
"""
Reset a custom acoustic model to remove all loaded resources.
Args:
customization_id (str): Unique ID of the custom acoustic model.
Returns:
(bool): True if successful. False otherwise.
"""
response = self._execute_watson_method(
self.stt.reset_acoustic_model, [WatsonReturnCodes.OK], [customization_id]
)
return response
def upgrade_custom_model(
self, customization_id: str, custom_language_model_id: str = None
) -> bool:
"""
Upgrade the base model of the custom acoustic model to its latest
version.
Args:
customization_id (str): Unique ID of the custom acoustic model.
custom_language_model_id (str):
ID of the custom language model (if any) that this acoustic
model was trained with a custom language model.
Returns:
(bool): True if successful. False otherwise.
"""
response = self._execute_watson_method(
self.stt.upgrade_acoustic_model,
[customization_id, custom_language_model_id],
)
return response
def get_custom_audio_resources(
self, customization_id: str
) -> Union[List[Dict], None]:
"""
List information about all audio resources of the specified custom
acoustic model.
Args:
customization_id (str): Unique ID of the custom acoustic model.
Returns:
(Union[List[Dict],None]):
Each internal list contains the following keys:
"duration","name","details","status"
None if unsuccessful.
"""
response = self._execute_watson_method(self.stt.list_audio, [customization_id])
if response:
return response["audio"]
def get_custom_audio_resource(
self, customization_id: str, audio_name: str
) -> Union[Dict, None]:
"""
Obtain information about a specific custom acoustic model.
Args:
customization_id (str): Unique ID of the custom acoustic model.
audio_name (str): Name of the specific audio resource.
Returns:
(Dict): Contains the following keys:
"duration","name","details","status"
None if unsuccessful
"""
response = self._execute_watson_method(
self.stt.get_audio,
[WatsonReturnCodes.OK],
[customization_id, audio_name],
)
if response:
return response
def add_custom_audio_resource(
self,
customization_id: str,
audio_name: str,
audio_resource: BinaryIO,
content_type: str,
) -> bool:
"""
Add an audio resource to the specific custom acoustic model.
Args:
customization_id (str): Unique ID of the custom acoustic model.
audio_name (str): Name of the specific audio resource.
audio_resource (BinaryIO):
Audio resources to be added to the custom model.
content_type (str):
Type of the audio resource. One of:
[application/zip,application/gzip,audio/alaw,audio/basic,
audio/flac,audio/g729,audio/l16,audio/mp3,audio/mpeg,
audio/mulaw,audio/ogg,audio/ogg;codecs=opus,
audio/ogg;codecs=vorbis,audio/wav,audio/webm,
audio/webm;codecs=opus,audio/webm;codecs=vorbis]
Returns:
(bool): True if successful. False otherwise.
"""
response = self._execute_watson_method(
self.stt.add_audio,
[WatsonReturnCodes.CREATED],
[customization_id, audio_name, audio_resource, content_type],
)
return response
def delete_custom_audio_resource(
self, customization_id: str, audio_name: str
) -> bool:
"""
Delete the specified resource from the custom audio model.
Args:
customization_id (str): Unique ID of the custom acoustic model.
audio_name (str): Name of the specific audio resource.
Returns:
(bool): True if successful. False otherwise.
"""
response = self._execute_watson_method(
self.stt.delete_audio,
[WatsonReturnCodes.OK],
[customization_id, audio_name],
)
return response
## PRIVATE ##
def _is_api_key_valid(self, apikey: str, url: str) -> bool:
try:
stt = self._initialize_stt_service(apikey)
stt.set_service_url(url)
stt.list_models()
return True
except:
return False
def _initialize_stt_service(self, apikey: str) -> SpeechToTextV1:
authenticator = IAMAuthenticator(apikey)
stt = SpeechToTextV1(authenticator=authenticator)
return stt
def _execute_watson_method(
self,
method: Callable,
expected_response_codes: List[WatsonReturnCodes],
args: List = [],
kwargs: Dict = {},
) -> Union[bool, Any]:
"""
Execute a watson method only if connected to watson.
Args:
method (Callable): Method to execute.
expected_response_codes (List[WatsonReturnCodes]):
Watson codes that are expected for a successful response.
args (List): Arguments to pass to the method.
kwargs (Dict): Keyword arguments to method
Returns:
(Union[bool,Any]):
result from watson if successful.
False otherwise.
"""
if not self.connected_to_service:
raise ERR.ConnectionError
try:
resp = method(*args, **kwargs)
if any(
[
resp.get_status_code() == expected
for expected in expected_response_codes
]
):
return resp.get_result()
raise ERR.WatsonMethodExecutionError
except ApiException as e:
logger.info(f"Exception raised: {e}", exc_info=e)
return False
except ERR.WatsonMethodExecutionError as e:
logger.info(f"Exception raised: {e}", exc_info=e)
return False | PypiClean |
/DeePyMoD-20.11b0.tar.gz/DeePyMoD-20.11b0/docs/examples/PDE_KdV/PDE_KdV.md | # Example Korteweg de Vries equation
In this notebook we provide a simple example of the DeepMoD algorithm by applying it on the KdV equation.
```python
# General imports
import numpy as np
import torch
import matplotlib.pylab as plt
# DeepMoD functions
from deepymod import DeepMoD
from deepymod.model.func_approx import NN
from deepymod.model.library import Library1D
from deepymod.model.constraint import LeastSquares
from deepymod.model.sparse_estimators import Threshold,PDEFIND
from deepymod.training import train
from deepymod.training.sparsity_scheduler import TrainTestPeriodic
from scipy.io import loadmat
# Settings for reproducibility
np.random.seed(42)
torch.manual_seed(0)
%load_ext autoreload
%autoreload 2
```
Next, we prepare the dataset.
```python
data = np.load('../data/kdv.npy', allow_pickle=True).item()
print('Shape of grid:', data['x'].shape)
```
Shape of grid: (512, 201)
Let's plot it to get an idea of the data:
```python
fig, ax = plt.subplots()
im = ax.contourf(data['x'], data['t'], np.real(data['u']))
ax.set_xlabel('x')
ax.set_ylabel('t')
fig.colorbar(mappable=im)
plt.show()
```

```python
X = np.transpose((data['t'].flatten(), data['x'].flatten()))
y = np.real(data['u']).reshape((data['u'].size, 1))
print(X.shape, y.shape)
```
(102912, 2) (102912, 1)
As we can see, $X$ has 2 dimensions, $\{x, t\}$, while $y$ has only one, $\{u\}$. Always explicity set the shape (i.e. $N\times 1$, not $N$) or you'll get errors. This dataset is noiseless, so let's add $2.5\%$ noise:
```python
noise_level = 0.025
y_noisy = y + noise_level * np.std(y) * np.random.randn(y[:,0].size, 1)
```
The dataset is also much larger than needed, so let's hussle it and pick out a 1000 samples:
```python
number_of_samples = 1000
idx = np.random.permutation(y.shape[0])
X_train = torch.tensor(X[idx, :][:number_of_samples], dtype=torch.float32, requires_grad=True)
y_train = torch.tensor(y_noisy[idx, :][:number_of_samples], dtype=torch.float32)
```
```python
print(X_train.shape, y_train.shape)
```
torch.Size([1000, 2]) torch.Size([1000, 1])
We now have a dataset which we can use. Let's plot, for a final time, the original dataset, the noisy set and the samples points:
```python
fig, axes = plt.subplots(ncols=3, figsize=(15, 4))
im0 = axes[0].contourf(data['x'], data['t'], np.real(data['u']), cmap='coolwarm')
axes[0].set_xlabel('x')
axes[0].set_ylabel('t')
axes[0].set_title('Ground truth')
im1 = axes[1].contourf(data['x'], data['t'], y_noisy.reshape(data['x'].shape), cmap='coolwarm')
axes[1].set_xlabel('x')
axes[1].set_title('Noisy')
sampled = np.array([y_noisy[index, 0] if index in idx[:number_of_samples] else np.nan for index in np.arange(data['x'].size)])
sampled = np.rot90(sampled.reshape(data['x'].shape)) #array needs to be rotated because of imshow
im2 = axes[2].imshow(sampled, aspect='auto', cmap='coolwarm')
axes[2].set_xlabel('x')
axes[2].set_title('Sampled')
fig.colorbar(im1, ax=axes.ravel().tolist())
plt.show()
```

## Configuring DeepMoD
Configuration of the function approximator: Here the first argument is the number of input and the last argument the number of output layers.
```python
network = NN(2, [50, 50, 50,50], 1)
```
Configuration of the library function: We select athe library with a 2D spatial input. Note that that the max differential order has been pre-determined here out of convinience. So, for poly_order 1 the library contains the following 12 terms:
* [$1, u_x, u_{xx}, u_{xxx}, u, u u_{x}, u u_{xx}, u u_{xxx}, u^2, u^2 u_{x}, u^2 u_{xx}, u^2 u_{xxx}$]
```python
library = Library1D(poly_order=2, diff_order=3)
```
Configuration of the sparsity estimator and sparsity scheduler used. In this case we use the most basic threshold-based Lasso estimator and a scheduler that asseses the validation loss after a given patience. If that value is smaller than 1e-5, the algorithm is converged.
```python
estimator = Threshold(0.1)
sparsity_scheduler = TrainTestPeriodic(periodicity=50, patience=10, delta=1e-5)
```
Configuration of the sparsity estimator
```python
constraint = LeastSquares()
# Configuration of the sparsity scheduler
```
Now we instantiate the model and select the optimizer
```python
model = DeepMoD(network, library, estimator, constraint)
# Defining optimizer
optimizer = torch.optim.Adam(model.parameters(), betas=(0.99, 0.99), amsgrad=True, lr=1e-3)
```
## Run DeepMoD
We can now run DeepMoD using all the options we have set and the training data:
* The directory where the tensorboard file is written (log_dir)
* The ratio of train/test set used (split)
* The maximum number of iterations performed (max_iterations)
* The absolute change in L1 norm considered converged (delta)
* The amount of epochs over which the absolute change in L1 norm is calculated (patience)
```python
train(model, X_train, y_train, optimizer,sparsity_scheduler, log_dir='runs/KDV/', split=0.8, max_iterations=100000, delta=1e-4, patience=8)
```
| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |
25775 25.77% 3510s 7.15e-06 6.44e-06 7.10e-07 2.88e+00 Algorithm converged. Stopping training.
Sparsity masks provide the active and non-active terms in the PDE:
```python
model.sparsity_masks
```
[tensor([False, False, False, True, False, True, False, False, False, False,
False, False])]
estimatior_coeffs gives the magnitude of the active terms:
```python
print(model.estimator_coeffs())
```
[array([[ 0. ],
[ 0. ],
[ 0. ],
[-0.9088099],
[ 0. ],
[-1.7334794],
[ 0. ],
[ 0. ],
[ 0. ],
[ 0. ],
[ 0. ],
[ 0. ]], dtype=float32)]
```python
```
| PypiClean |
/Mecoda_Orange-1.9.0-py3-none-any.whl/mecoda_orange/canAIRio_mobile_extra_info.py | from orangewidget.widget import OWBaseWidget, Output, Input
from orangewidget.settings import Setting
from orangewidget import gui
from orangewidget.utils.widgetpreview import WidgetPreview
import Orange.data
from Orange.data.pandas_compat import table_from_frame, table_to_frame
import requests
import pandas as pd
from datetime import datetime
def get_mobile_track(name):
try:
df = pd.DataFrame(requests.get(
f"http://api.canair.io:8080/tracks/{name}").json()['data'])
df['station'] = name
df['station'] = pd.Categorical(df['station'])
df['timestamp'] = df.timestamp.apply(
lambda x: datetime.fromtimestamp(x))
except:
print(f"Fallo: {name}")
df = pd.DataFrame()
return df
class ExtraInfoWidget(OWBaseWidget):
# Widget's name as displayed in the canvas
name = "Track - Mobile Station"
# Short widget description
description = "Get extra information from mobile station tracks through CanAIRio API"
# An icon resource file path for this widget
# (a path relative to the module where this widget is defined)
icon = "icons/circle-info-solid-rosa.png"
priority = 12
# Basic (convenience) GUI definition:
# a simple 'single column' GUI layout
want_main_area = False
# with a fixed non resizable geometry.
resizing_enabled = False
# We want the current number entered by the user to be saved and restored when saving/loading a workflow.
# We can achieve this by declaring a special property/member in the widget’s class definition like so:
#id_obs = Setting(0, schema_only=True)
obs_table = Setting("", schema_only=True)
# Widget's outputs; here, a single output named "Number", of type int
class Inputs:
data = Input("Data", Orange.data.Table, auto_summary=False)
class Outputs:
extra_data = Output("extra", Orange.data.Table, auto_summary=False)
def __init__(self):
# use the init method from the class OWBaseWidget
super().__init__()
self.dataset = None
# info area
info = gui.widgetBox(self.controlArea, "Info")
self.infoa = gui.widgetLabel(info, 'No observations fetched yet.')
self.infob = gui.widgetLabel(info, '')
gui.separator(self.controlArea)
def info_searching(self):
self.infoa.setText('Searching...')
@Inputs.data
def set_data(self, dataset):
if dataset is not None:
self.dataset = dataset
self.infoa.setText('%d instances in input dataset' % len(dataset))
self.infob.setText("")
else:
self.dataset = None
self.infoa.setText(
'No data on input yet, waiting to get something.')
self.infob.setText('')
self.commit()
def commit(self):
if self.dataset is not None:
df = table_to_frame(self.dataset)
ids = df['name'].to_list()
progress = gui.ProgressBar(self, len(df))
observations = pd.DataFrame()
for id_num in ids:
obs = get_mobile_track(id_num)
observations = pd.concat([observations, obs])
progress.advance()
progress.finish()
self.obs_table = table_from_frame(observations)
self.infoa.setText(
f'{len(self.dataset)} instances in input dataset')
self.infob.setText(
f"{len(self.obs_table)} instances in output dataset")
self.info.set_output_summary(len(observations))
else:
self.infoa.setText(
'No data on input yet, waiting to get something.')
self.infob.setText('')
self.info.set_output_summary(self.info.NoOutput)
self.Outputs.extra_data.send(self.obs_table)
if __name__ == "__main__":
WidgetPreview(ExtraInfoWidget).run() | PypiClean |
/NCloudAISDK-1.0.1.tar.gz/NCloudAISDK-1.0.1/aicloud/cloudapi.py | from aicloud.rest import get, post, put
import json
import os
class AIPlatform(object):
model_upload_path = '/model/file'
model_download_path = '/model/download/modelfile'
create_training_task = '/training/training/form'
create_training_duration = '/training/finish/form/'
def __init__(self, base_url, version='v1', authorization=None, auth_token=None):
self.authorization = authorization
self.auth_token = auth_token
self.url = base_url.rstrip('/') + '/ai/api/' + version
self.train_id_file = '/workspace/trainid.txt'
def _make_token_headers(self, content_type=None):
headers = {
'Authorization': self.authorization,
'auth-token': self.auth_token
}
if content_type:
headers['Content-Type'] = content_type
return headers
def upload_model_file(self, user_id, train_id, nbid, model_file):
"""
upload model file to cloud storage
:param user_id: user id
:param train_id: training id
:param model_file: model file by training
:return:
"""
if os.path.exists(self.train_id_file):
with open(self.train_id_file) as f:
train_id = f.readline()
url = self.url + self.model_upload_path
file = {'multipartFile': model_file}
data = {
'trainingId': train_id,
'userId': user_id,
'nbid': nbid
}
return post(url, headers=self._make_token_headers(), data=data, files=file)
def download_model_file(self, train_id):
"""
download model file from cloud storage
:param train_id: training id
:return:
"""
url = self.url + self.model_download_path
params = {'trainingId': train_id}
return get(url, headers=self._make_token_headers(), params=params)
def save_model_info(self, training_name, log_path, nbid):
"""
:param training_name:
:param nbid:
:param task_url:
:return:
"""
url = self.url + self.create_training_task
data = {
'trainingName': training_name,
'notebookId': nbid,
'logAddr': log_path
}
return post(url, headers=self._make_token_headers(content_type='application/json'), data=json.dumps(data))
def training_duration_info(self):
if os.path.exists(self.train_id_file):
with open(self.train_id_file) as f:
train_id = f.readline()
url = self.url + self.create_training_duration + str(train_id)
return put(url, headers=self._make_token_headers())
class StorageEngine(object):
pass | PypiClean |
/OASYS1-shadow4-0.0.10.tar.gz/OASYS1-shadow4-0.0.10/orangecontrib/shadow4/widgets/sources/ow_bending_magnet.py | import sys
import time
import numpy
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import gui as oasysgui
from oasys.widgets import congruence
from oasys.util.oasys_util import EmittingStream
from orangecontrib.shadow4.widgets.gui.ow_electron_beam import OWElectronBeam
from orangecontrib.shadow4.util.shadow4_objects import ShadowData
from syned.beamline.beamline import Beamline
from shadow4.beamline.s4_beamline import S4Beamline
from syned.storage_ring.magnetic_structures.bending_magnet import BendingMagnet
from syned.widget.widget_decorator import WidgetDecorator
from shadow4.sources.bending_magnet.s4_bending_magnet import S4BendingMagnet
from shadow4.sources.bending_magnet.s4_bending_magnet_light_source import S4BendingMagnetLightSource
class OWBendingMagnet(OWElectronBeam, WidgetDecorator):
name = "Bending Magnet"
description = "Shadow Source: Bending Magnet"
icon = "icons/bending_magnet.png"
priority = 3
inputs = []
WidgetDecorator.append_syned_input_data(inputs)
outputs = [{"name":"Shadow Data",
"type":ShadowData,
"doc":"",}]
number_of_rays = Setting(5000)
seed = Setting(5676561)
magnetic_field = Setting(-1.26754)
divergence = Setting(69e-3)
emin = Setting(1000.0) # Photon energy scan from energy (in eV)
emax = Setting(1000.1) # Photon energy scan to energy (in eV)
ng_e = Setting(100) # Photon energy scan number of points
ng_j = Setting(100) # Number of points in electron trajectory (per period) for internal calculation only
def __init__(self):
super().__init__()
tab_bas = oasysgui.createTabPage(self.tabs_control_area, "Bending Magnet Setting")
#
box_1 = oasysgui.widgetBox(tab_bas, "Bending Magnet Parameters", addSpace=True, orientation="vertical")
oasysgui.lineEdit(box_1, self, "emin", "Minimum Energy [eV]", tooltip="emin", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(box_1, self, "emax", "Maximum Energy [eV]", tooltip="emax", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(box_1, self, "magnetic_field", "Magnetic Field [T]", tooltip="magnetic_field", labelWidth=260, valueType=float, orientation="horizontal")
oasysgui.lineEdit(box_1, self, "divergence", "Horizontal divergence (arc of radius) [rads]", tooltip="divergence", labelWidth=260, valueType=float, orientation="horizontal")
box_2 = oasysgui.widgetBox(tab_bas, "Sampling rays", addSpace=True, orientation="vertical")
oasysgui.lineEdit(box_2, self, "number_of_rays", "Number of Rays", tooltip="Number of Rays", labelWidth=250, valueType=int, orientation="horizontal")
oasysgui.lineEdit(box_2, self, "seed", "Seed", tooltip="Seed (0=clock)", labelWidth=250, valueType=int, orientation="horizontal")
box_3 = oasysgui.widgetBox(tab_bas, "Optional parameters for internal calculation", addSpace=True, orientation="vertical")
oasysgui.lineEdit(box_3, self, "ng_e", "Spectrum number of points", tooltip="ng_e", labelWidth=250, valueType=int, orientation="horizontal")
oasysgui.lineEdit(box_3, self, "ng_j", "Electron trajectory number of points", tooltip="ng_j", labelWidth=250, valueType=int, orientation="horizontal")
gui.rubber(self.controlArea)
gui.rubber(self.mainArea)
def checkFields(self):
self.number_of_rays = congruence.checkPositiveNumber(self.number_of_rays, "Number of rays")
self.seed = congruence.checkPositiveNumber(self.seed, "Seed")
# self.energy = congruence.checkPositiveNumber(self.energy, "Energy")
# self.delta_e = congruence.checkPositiveNumber(self.delta_e, "Delta Energy")
# self.undulator_length = congruence.checkPositiveNumber(self.undulator_length, "Undulator Length")
def get_lightsource(self):
# syned electron beam
electron_beam = self.get_electron_beam()
print("\n\n>>>>>> electron_beam info: ", electron_beam.info())
if self.type_of_properties == 3:
flag_emittance = 0
else:
flag_emittance = 1
magnetic_radius = S4BendingMagnet.calculate_magnetic_radius(self.magnetic_field, electron_beam.energy())
length = numpy.abs(self.divergence * magnetic_radius)
print(">>> calculated magnetic_radius = S4BendingMagnet.calculate_magnetic_radius(%f, %f) = %f" %\
(self.magnetic_field, electron_beam.energy(), magnetic_radius))
print(">>> calculated BM length = divergence * magnetic_radius = %f " % length)
bm = S4BendingMagnet(magnetic_radius,self.magnetic_field,length,
emin=self.emin, # Photon energy scan from energy (in eV)
emax=self.emax, # Photon energy scan to energy (in eV)
ng_e=self.ng_e, # Photon energy scan number of points
ng_j=self.ng_j,
# Number of points in electron trajectory (per period) for internal calculation only
flag_emittance=flag_emittance, # when sampling rays: Use emittance (0=No, 1=Yes)
)
# bm = S4BendingMagnet.initialize_from_magnetic_field_divergence_and_electron_energy(
# magnetic_field=self.magnetic_field,
# divergence=self.divergence,
# electron_energy_in_GeV=electron_beam.energy(),
# emin=self.emin,# Photon energy scan from energy (in eV)
# emax=self.emax,# Photon energy scan to energy (in eV)
# ng_e=self.ng_e,# Photon energy scan number of points
# ng_j=self.ng_j,# Number of points in electron trajectory (per period) for internal calculation only
# flag_emittance=flag_emittance,# when sampling rays: Use emittance (0=No, 1=Yes)
# )
print("\n\n>>>>>> BM info: ", bm.info())
# S4UndulatorLightSource
lightsource = S4BendingMagnetLightSource(name='BendingMagnet',
electron_beam=electron_beam,
magnetic_structure=bm,
nrays=self.number_of_rays,
seed=self.seed)
print("\n\n>>>>>> S4BendingMagnetLightSource info: ", lightsource.info())
return lightsource
def run_shadow4(self):
sys.stdout = EmittingStream(textWritten=self._write_stdout)
self._set_plot_quality()
self.progressBarInit()
light_source = self.get_lightsource()
self.progressBarSet(5)
# run shadow4
t00 = time.time()
print(">>>> starting calculation...")
output_beam = light_source.get_beam()
# todo:
# photon_energy, flux, spectral_power = light_source.calculate_spectrum()
t11 = time.time() - t00
print(">>>> time for %d rays: %f s, %f min, " % (self.number_of_rays, t11, t11 / 60))
#
# beam plots
#
self._plot_results(output_beam, None, progressBarValue=80)
#
# script
#
script = light_source.to_python_code()
script += "\n\n# test plot\nfrom srxraylib.plot.gol import plot_scatter"
script += "\nrays = beam.get_rays()"
script += "\nplot_scatter(1e6 * rays[:, 0], 1e6 * rays[:, 2], title='(X,Z) in microns')"
self.shadow4_script.set_code(script)
self.progressBarFinished()
#
# send beam
#
self.send("Shadow Data", ShadowData(beam=output_beam,
number_of_rays=self.number_of_rays,
beamline=S4Beamline(light_source=light_source)))
def receive_syned_data(self, data):
if data is not None:
if isinstance(data, Beamline):
if not data.get_light_source() is None:
if isinstance(data.get_light_source().get_magnetic_structure(), BendingMagnet):
light_source = data.get_light_source()
# self.energy = round(light_source.get_magnetic_structure().resonance_energy(light_source.get_electron_beam().gamma()), 3)
# self.delta_e = 0.0
# self.undulator_length = light_source.get_magnetic_structure().length()
self.populate_fields_from_syned_electron_beam(light_source.get_electron_beam())
else:
raise ValueError("Syned light source not congruent")
else:
raise ValueError("Syned data not correct: light source not present")
else:
raise ValueError("Syned data not correct")
if __name__ == "__main__":
from PyQt5.QtWidgets import QApplication
a = QApplication(sys.argv)
ow = OWBendingMagnet()
ow.show()
a.exec_()
ow.saveSettings() | PypiClean |
/DatabasePipe-2.2.1.tar.gz/DatabasePipe-2.2.1/databasepipe/helper/__init__.py | from databasepipe.helper import plugin
from bn import AttributeDict
def dict_fetchall(cursor):
rows = cursor.fetchall()
res = []
for row in rows:
if AttributeDict is not None:
dictionary = AttributeDict()
else:
dictionary = {}
for i in range(len(row)):
dictionary[cursor.description[i][0]] = row[i]
res.append(dictionary)
return res
param_style_converters = {
'qmark': '?'
}
def substitute_param_style(sql, param_style):
if param_style == 'format':
pass
elif param_style == 'qmark':
parts = sql.split('%%s')
res = []
ps = param_style_converters[param_style]
for part in parts:
res.append(part.replace('%s', ps))
sql = '%s'.join(res)
else:
raise Exception('Unsupported param_style %r'%(param_style,))
return sql
def query(
connection,
sql,
values=(),
format='dict',
fetch=True,
substitute=True,
param_style='format',
):
'''\
Execute ``sql`` using a cursor obtained from ``connection``. Any ``%s``
characters in the SQL string are substituted with the values in ``values`` in a
safe way helping you to avoid SQL injection attacks. If you need a literal
``%s`` in the SQL, you write it as ``%%s``. The SQL is translated to use the
correct param_style for the underlying database which you specify as the
``param_style`` argument. This allows you to use ``%s`` for all SQL statements.
To avoid this substitution you can set ``substitute`` to ``False``.
If ``fetch`` is ``True`` (the default), the results from executing the query
are returned. This is usually what you want for ``SELECT`` statements but
probably not what you want for ``INSERT``, ``UPDATE`` or ``DELETE`` statements.
By default the results are returned as a list of dictionaries where the keys in
the dictionaries are the column names and the values are the values from that
row. If you have the BareNecessities package installed the results are returned
as a list of ``bn.AttributeDict`` objects which are like dictionaries but which
also allow attribute access to any keys which are also valid Python names. You
can specify ``format='list'`` to have the results returned in the format used
by DB-API 2.0 cursor (usually a tuple of tuples).
Here's a simple example:
::
>>> rows = query(
... connection,
... """
... SELECT
... name
... , age
... FROM
... person
... WHERE
... name=%s
... """,
... (u'James',)
... )
>>> print rows[0]['name']
u'James'
.. note ::
When specifying values they should always be specified as a
tuple. When using a tuple with a single value in Python you must always have a
trailing comma so that the brackets are treated as a tuple.
'''
if substitute:
sql = substitute_param_style(sql, param_style)
cursor = connection.cursor()
cursor.execute(sql, values)
if fetch:
if format == 'list':
rows = cursor.fetchall()
elif format == 'dict':
rows = dict_fetchall(cursor)
else:
raise Exception('Unrecognised cursor fetch format %s'%(format))
else:
rows = []
cursor.close()
return rows
def update_record(
connection,
table_name,
primary_key_column_name,
primary_key_value,
data_dict,
substitute=True,
param_style='format',
):
'''\
Update a single row in a table identified by a primary key. For more
complex update operations use ``query()`` and write the required SQL by hand.
``connection``
The name of the connection to use to perform the update
``table_name``
The name of the table to update
``primary_key_column_name``
The name of the primary key column
``primary_key_value``
The ID or primary key value of the record to update
``data_dict``
A dictionary where the keys represents the columns to update and the
values represent the new values to use
``substitute``
If ``True`` any ``%s`` characters will be substituted for the
correct param_style characters in the format specified by
``param_style``
``param_style``
The param_style format the underlying database driver expects. Can be
``'format'`` for ``%s`` style or ``query`` for ``?`` style
An example:
::
>>> update_record(
... connection,
... 'person',
... 'person_id',
... 34,
... {'name': u'James'}
... )
'''
cursor = connection.cursor()
columns = []
values = []
for k, v in data_dict.items():
values.append(v)
columns.append(k)
columns_str = ""
for col in columns:
columns_str += '"%s"=%%s, '%(col)
columns_str = columns_str[:-2]
sql = """
UPDATE %s SET %s WHERE "%s"=%%s;
""" % (
table_name,
columns_str,
primary_key_column_name,
)
if substitute:
sql = substitute_param_style(sql, param_style)
values.append(primary_key_value)
cursor.execute(sql, tuple(values))
cursor.close()
return True
def insert_record(
connection,
table_name,
data_dict,
primary_key_column_name=None,
plugin_name=None,
):
"""\
Insert a new record into a table and return its integer primary key
There is a different version of this function for every supported database
and the correct format is specified by ``engine`` or inferred from the
``connection``.
``connection``
The name of the connection to use to perform the update
``table_name``
The name of the table to update
``data_dict``
A dictionary where the keys represents the columns to update and the
values represent the new values to use
``primary_key_column_name``
The name of the primary key column. If ``primary_key_column_name`` is
``None``, it is assumed you are following a naming convention where
the primary key is the table name followed by ``_id``.
``plugin_name``
The type and structure of the underlying database. This affects how the
new ID is generated and returned since different databases handle it
differently. Only the value ``postgresql`` is currently allowed.
An example:
::
>>> print insert_record(
... connection,
... 'person',
... {'name': u'James'}
... 'person_id',
... engine=None,
... )
2
"""
if not plugin.available_plugins.has_key(plugin_name):
raise Exception('Plugin %r not found'%plugin_name)
if not plugin.insert_record.has_key(plugin_name):
plugin.insert_record[plugin_name] = \
plugin.available_plugins[plugin_name]['insert_record'].load()
# Use the insert_record_plugins plugin's insert method
return plugin.insert_record[plugin_name](
connection,
table_name,
data_dict,
primary_key_column_name,
)
def page_data(
connection,
sql,
values,
page=None,
number=20,
format='list',
substitute=True,
param_style='format',
):
"""\
Obtain a specific portion of a result set.
"""
if page is not None:
sql += """
OFFSET %s
LIMIT %s
"""
values.append((page-1)*number)
values.append(number)
cursor = connection.cursor()
if substitute:
sql = substitute_param_style(sql, param_style)
cursor.execute(sql, tuple(values))
if format == 'list':
rows = cursor.fetchall()
elif format == 'dict':
rows = dict_fetchall(cursor)
else:
raise Exception('Unrecognised cursor fetch format %s'%(format))
cursor.close()
return rows
def service_extensions(bag, name):
"""\
This function exposes the database helpers as part of the bag and
uses the ``bag[name].connect()`` function to automatically obtain a
database connection from the bag when it is needed.
"""
def bag_insert_record(*k, **p):
args = p.copy()
real_args = ['table_name', 'data_dict', 'primary_key_column_name']
for i in range(len(k)):
args[real_args[i]] = k[i]
for arg in ['connection', 'engine']:
if args.has_key(arg):
raise TypeError(
"The %r argument is assigned automatically, you cannot "
"specify it"%(arg,)
)
args['plugin_name'] = bag.app.config[name].plugin
args['connection'] = bag[name].connect()
return insert_record(**args)
def bag_update_record(*k, **p):
args = p.copy()
real_args = ['table_name', 'primary_key_column_name', 'primary_key_value', 'data_dict']
for i in range(len(k)):
args[real_args[i]] = k[i]
for arg in ['connection', 'param_style']:
if args.has_key(arg):
raise TypeError(
"The %r argument is assigned automatically, you cannot "
"specify it"%(arg,)
)
args['param_style'] = bag.app.config[name].param_style
args['connection'] = bag[name].connect()
return update_record(**args)
def bag_query(*k, **p):
args = p.copy()
real_args = ['sql', 'values', 'format', 'fetch', 'substitute']
for i in range(len(k)):
args[real_args[i]] = k[i]
for arg in ['connection', 'param_style']:
if args.has_key(arg):
raise TypeError(
"The %r argument is assigned automatically, you cannot "
"specify it"%(arg,)
)
args['connection'] = bag[name].connect()
args['param_style'] = bag.app.config[name].param_style
return query(**args)
def bag_page_data(*k, **p):
raise Exception('Not yet implemented')
return query(bag[name].connect(), *k, **p)
bag[name]['insert_record'] = bag_insert_record
bag[name]['update_record'] = bag_update_record
bag[name]['page_data'] = bag_page_data
bag[name]['dict_fetchall'] = dict_fetchall
bag[name]['query'] = bag_query | PypiClean |
/ConfigViper-0.5.tar.gz/ConfigViper-0.5/doc/source/converters.rst |
=======================
Working with Converters
=======================
Converters are deceptively simple.
You may know that JSON plays well with a limited set of value types. ConfigViper
just provides a way to work easily with Python dictionaries. For persistence,
ConfigViper stores your configuration values to a file in JSON format. Why JSON?
Because JSON is a portable format, it is human readable and can be easily edited
with any text editor.
So you'll need converters if you want to store more sofisticated Python types,
like dates, times, decimal values (not only floats), or whatever type you can
convert from Python to a type JSON can handle. A converter is a simple Python
class with two methods: ``to_python`` and ``to_json``. Convertes works on a per
config-path basis. This means that you can have a specialized converter for a
complex type for an specific config-path.
Built-in Converters
-------------------
ConfigViper comes with built-in converters for
date (:class:`~configviper.converters.DateConverter`),
time (:class:`~configviper.converters.TimeConverter`),
datetime (:class:`~configviper.converters.DatetimeConverter`) and
decimal (:class:`~configviper.converters.DecimalConverter`) objects. You can
refer to :doc:`api/intro` for details on these converters. The ``converters``
module also offers instances of these built-in converters as
:attr:`~configviper.converters.DATE_CONVERTER`,
:attr:`~configviper.converters.DATETIME_CONVERTER`,
:attr:`~configviper.converters.DECIMAL_CONVERTER`, and
:attr:`~configviper.converters.TIME_CONVERTER` constants.
Building Your Own Converters
----------------------------
Any class should act as a converter. It just needs to provide two methods, one
called ``to_python`` and another called ``to_json``. By the way, a converter
can inherit from :class:`~configviper.converters.Converter`, but it's not a
requirement and your class will not gain anything by doing this (as I said,
converters are deceptively simple).
The ``to_json`` method should convert your Python type to a type JSON can
handle. You can serialize your Python type to a string, for example, or you
can return a dictionary with types JSON can handle. The ``to_python`` method
should convert your JSON value back to your Python type. Let's build a
converter for a ``Person`` class:
.. code-block:: python
class Person(object):
name = ''
age = 0
def __init__(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
def __repr__(self):
return '%s, %d years old' % (self.name, self.age)
For this example we'll convert our ``Person`` instances to Python dictionaries,
where each key with a value JSON can handle (string for the name and integer
for the age attribute):
.. code-block:: python
from configviper import converters
class PersonConverter(converters.Converter):
def to_json(self, value):
# value should be an instance of Person
return { 'name': value.name, 'age': value.age }
def to_python(self, value):
# value will be deserialized from JSON as a Python dictonary
# just like the returned by to_json method
return Person(**value)
Now you can apply your converter to a config-path on stabilization, like this:
.. code-block:: python
from configviper import ConfigViper
values = (
('sys.owner', Person(name='John Doe', age=37), PersonConverter(),),)
conf = ConfigViper()
conf.stabilize(values)
print conf.sys.owner
# expected "John Doe, 37 years old"
conf.set('sys.owner', Person(name='Alice Foo', age=25))
The resulting configuration file should looks like:
.. code-block:: json
{
"sys": {
"owner": {
"name": "Alice Foo",
"age": 25
}
}
}
| PypiClean |
/Distributions_Pack_Testing-1.0.tar.gz/Distributions_Pack_Testing-1.0/Distributions_Pack_Testing/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x=['0', '1'], height=[(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial Distributions_Pack_Testing with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}". \
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/Auptimizer-2.0.tar.gz/Auptimizer-2.0/src/aup/compression/torch/speedup/compress_modules.py |
import logging
import torch
from .infer_shape import ModuleMasks
_logger = logging.getLogger(__name__)
replace_module = {
'BatchNorm2d': lambda module, mask: replace_batchnorm2d(module, mask),
'Conv2d': lambda module, mask: replace_conv2d(module, mask),
'ConvTranspose2d': lambda module, mask: replace_convtranspose2d(module, mask),
'MaxPool2d': lambda module, mask: no_replace(module, mask),
'AvgPool2d': lambda module, mask: no_replace(module, mask),
'AdaptiveAvgPool2d': lambda module, mask: no_replace(module, mask),
'ReLU': lambda module, mask: no_replace(module, mask),
'ReLU6': lambda module, mask: no_replace(module, mask),
'Sigmoid': lambda module, mask: no_replace(module, mask),
'Linear': lambda module, mask: replace_linear(module, mask),
'Dropout': lambda module, mask: no_replace(module, mask),
'Dropout2d': lambda module, mask: no_replace(module, mask),
'Dropout3d': lambda module, mask: no_replace(module, mask)
}
def no_replace(module, mask):
"""
No need to replace
"""
_logger.debug("no need to replace")
return module
def replace_linear(linear, mask):
"""
Parameters
----------
linear : torch.nn.Linear
The linear module to be replace
mask : ModuleMasks
The masks of this module
Returns
-------
torch.nn.Linear
The new linear module
"""
assert isinstance(mask, ModuleMasks)
assert mask.input_mask is not None
assert mask.output_mask is None
assert not mask.param_masks
index = mask.input_mask.mask_index[-1]
in_features = index.size()[0]
_logger.debug("replace linear with new in_features: %d", in_features)
new_linear = torch.nn.Linear(in_features=in_features,
out_features=linear.out_features,
bias=linear.bias is not None)
new_linear.to(linear.weight.device)
new_linear.weight.data = torch.index_select(
linear.weight.data, -1, index.to(linear.weight.device))
if linear.bias is not None:
new_linear.bias.data.copy_(linear.bias.data)
return new_linear
def replace_batchnorm2d(norm, mask):
"""
Parameters
----------
norm : torch.nn.BatchNorm2d
The batchnorm module to be replace
mask : ModuleMasks
The masks of this module
Returns
-------
torch.nn.BatchNorm2d
The new batchnorm module
"""
assert isinstance(mask, ModuleMasks)
assert 'weight' in mask.param_masks and 'bias' in mask.param_masks
index = mask.param_masks['weight'].mask_index[0]
num_features = index.size()[0]
_logger.debug("replace batchnorm2d with num_features: %d", num_features)
new_norm = torch.nn.BatchNorm2d(num_features=num_features,
eps=norm.eps,
momentum=norm.momentum,
affine=norm.affine,
track_running_stats=norm.track_running_stats)
# assign weights
new_norm.weight.data = torch.index_select(norm.weight.data, 0, index)
new_norm.bias.data = torch.index_select(norm.bias.data, 0, index)
if norm.track_running_stats:
new_norm.running_mean.data = torch.index_select(
norm.running_mean.data, 0, index)
new_norm.running_var.data = torch.index_select(
norm.running_var.data, 0, index)
return new_norm
def replace_conv2d(conv, mask):
"""
Parameters
----------
conv : torch.nn.Conv2d
The conv2d module to be replaced
mask : ModuleMasks
The masks of this module
Returns
-------
torch.nn.Conv2d
The new conv2d module
"""
assert isinstance(mask, ModuleMasks)
if mask.input_mask is None:
in_channels = conv.in_channels
else:
in_channels_index = mask.input_mask.mask_index[1]
in_channels = in_channels_index.size()[0]
if mask.output_mask is None:
out_channels = conv.out_channels
else:
out_channels_index = mask.output_mask.mask_index[1]
out_channels = out_channels_index.size()[0]
groups = conv.groups
if conv.in_channels == conv.out_channels == conv.groups:
# remove groups for depthwise layers
assert in_channels == out_channels
groups = in_channels
_logger.debug("replace conv2d %s with in_channels: %d, out_channels: %d",
mask.module_name, in_channels, out_channels)
new_conv = torch.nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=conv.kernel_size,
stride=conv.stride,
padding=conv.padding,
dilation=conv.dilation,
groups=groups,
bias=conv.bias is not None,
padding_mode=conv.padding_mode)
new_conv.to(conv.weight.device)
tmp_weight_data = tmp_bias_data = None
if mask.output_mask is not None:
tmp_weight_data = torch.index_select(
conv.weight.data, 0, out_channels_index)
if conv.bias is not None:
tmp_bias_data = torch.index_select(
conv.bias.data, 0, out_channels_index)
else:
tmp_weight_data = conv.weight.data
# For the convolutional layers that have more than one group
# we need to copy the weight group by group, because the input
# channal is also divided into serveral groups and each group
# filter may have different input channel indexes.
input_step = int(conv.in_channels / conv.groups)
in_channels_group = int(in_channels / groups)
filter_step = int(out_channels / groups)
if mask.input_mask is not None and not (in_channels == out_channels == groups):
for groupid in range(conv.groups):
start = groupid * input_step
end = (groupid + 1) * input_step
current_input_index = list(
filter(lambda x: start <= x and x < end, in_channels_index.tolist()))
if not current_input_index:
# there is no kept channel in current group
# TODO bug here, the groups is directly get from conv.groups, if the whole group is removed,
# then the number of groups in the new_conv also need to change
raise Exception(
" Donnot support removing the whole group filter except in the depth-wise conv temporarily")
# shift the global index into the group index
current_input_index = [x-start for x in current_input_index]
# if the groups is larger than 1, the input channels of each
# group should be pruned evenly.
assert len(current_input_index) == in_channels_group, \
'Input channels of each group are not pruned evenly'
current_input_index = torch.tensor(current_input_index).to(tmp_weight_data.device) # pylint: disable=not-callable
f_start = groupid * filter_step
f_end = (groupid + 1) * filter_step
new_conv.weight.data[f_start:f_end] = torch.index_select(
tmp_weight_data[f_start:f_end], 1, current_input_index)
else:
new_conv.weight.data.copy_(tmp_weight_data)
if conv.bias is not None:
new_conv.bias.data.copy_(
conv.bias.data if tmp_bias_data is None else tmp_bias_data)
return new_conv
def replace_convtranspose2d(convtrans, mask):
"""
We need anothor replace function for
convtranspose2d, because the layout of
the weight is different from traditional
conv layers. The layout of the weight is [N_in, N_out, ksize_1, ksize_2]
Parameters
----------
convtrans : torch.nn.ConvTranspose2d
The conv2d module to be replaced
mask : ModuleMasks
The masks of this module
Returns
-------
torch.nn.ConvTranspose2d
The new conv2d module
"""
assert isinstance(mask, ModuleMasks)
assert isinstance(convtrans, torch.nn.ConvTranspose2d)
if mask.input_mask is None:
in_channels = convtrans.in_channels
else:
in_channels_index = mask.input_mask.mask_index[1]
in_channels = in_channels_index.size(0)
if mask.output_mask is None:
out_channels = convtrans.out_channels
else:
out_channels_index = mask.output_mask.mask_index[1]
out_channels = out_channels_index.size(0)
groups = convtrans.groups
# check if can remove the whole group of filters
if convtrans.in_channels == convtrans.out_channels == convtrans.groups:
# remove groups for depthwise layers
# this needs the group dependency to be fixed before the speedup
assert in_channels == out_channels
groups = in_channels
_logger.debug('Replace convtranspose2d %s with in_channels:%d out_channels:%d',
mask.module_name, in_channels, out_channels)
new_convtrans = torch.nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=convtrans.kernel_size,
stride=convtrans.stride,
padding=convtrans.padding,
dilation=convtrans.dilation,
groups=groups,
bias=convtrans.bias is not None,
padding_mode=convtrans.padding_mode)
new_convtrans.to(convtrans.weight.device)
tmp_weight_data = None
if mask.input_mask is not None:
# in convtranspose2d we need to select the input channel first
tmp_weight_data = torch.index_select(
convtrans.weight.data, 0, in_channels_index)
else:
tmp_weight_data = convtrans.weight.data
# we need to handle the output channel group by group like the conv layer
out_step = int(convtrans.out_channels / convtrans.groups)
out_channel_group = int(out_channels/groups)
new_in_per_group = int(in_channels/groups)
if mask.output_mask is not None and not(in_channels == out_channels == groups):
for groupid in range(convtrans.groups):
start = groupid * out_step
end = (groupid + 1) * out_step
current_output_index = list(
filter(lambda x: start <= x and x < end, out_channels_index.tolist()))
# we need to shift the index into the group-wise
current_output_index = [x-start for x in current_output_index]
if not current_output_index:
# No kept channel in the current group
raise Exception(
" Donnot support removing the whole group filter except in the depth-wise conv temporarily")
assert len(current_output_index) == out_channel_group, \
'Output channel of each group should be the same after pruning'
current_output_index = torch.tensor(current_output_index).to(tmp_weight_data.device) # pylint: disable=not-callable
new_start = groupid * new_in_per_group
new_end = (groupid + 1) * new_in_per_group
new_convtrans.weight.data[new_start:new_end] = torch.index_select(
tmp_weight_data[new_start:new_end], 1, current_output_index)
else:
new_convtrans.weight.data.copy_(tmp_weight_data)
if convtrans.bias is not None:
if mask.output_mask is not None:
new_convtrans.bias.data[:] = torch.index_select(
convtrans.bias.data, 0, out_channels_index)
else:
new_convtrans.bias.data.copy_(convtrans.bias.data)
return new_convtrans | PypiClean |
/OCAICM-0.0.2.tar.gz/OCAICM-0.0.2/script/gnn_utils.py | from rdkit import Chem
import datetime
import dgl
import numpy as np
import random
import torch
import torch.nn.functional as F
import dgl
from dgl import model_zoo
from dgl.data.chem import one_hot_encoding
from dgl.data.utils import split_dataset
from sklearn.metrics import roc_auc_score
from dgl.data.chem import BaseAtomFeaturizer, BaseBondFeaturizer, ConcatFeaturizer, atom_type_one_hot, atom_degree_one_hot, \
atom_implicit_valence_one_hot, atom_formal_charge, atom_num_radical_electrons, atom_hybridization_one_hot, \
atom_is_aromatic, atom_total_num_H_one_hot, atom_total_degree_one_hot, atom_chiral_tag_one_hot, atom_mass, \
bond_type_one_hot, bond_is_conjugated, bond_is_in_ring, bond_stereo_one_hot
from functools import partial
import torch.nn as nn
import dgl
import torch
from dgl.nn.pytorch.conv import RelGraphConv
from dgl.model_zoo.chem.classifiers import MLPBinaryClassifier
from dgl.nn.pytorch.glob import WeightAndSum
# from dgl import BatchedDGLGraph
from sklearn.metrics import roc_auc_score, confusion_matrix, precision_recall_curve, auc, \
mean_absolute_error, r2_score, matthews_corrcoef
def chirality(atom): # the chirality information defined in the AttentiveFP
try:
return one_hot_encoding(atom.GetProp('_CIPCode'), ['R', 'S']) + \
[atom.HasProp('_ChiralityPossible')]
except:
return [False, False] + [atom.HasProp('_ChiralityPossible')]
AttentiveFPAtomFeaturizer = BaseAtomFeaturizer(
featurizer_funcs={'h': ConcatFeaturizer([
partial(atom_type_one_hot, allowable_set=[
'B', 'C', 'N', 'O', 'F', 'Si', 'P', 'S', 'Cl', 'As', 'Se', 'Br', 'Te', 'I', 'At'],
encode_unknown=True),
partial(atom_degree_one_hot, allowable_set=list(range(6))),
atom_formal_charge, atom_num_radical_electrons,
partial(atom_hybridization_one_hot, encode_unknown=True),
atom_is_aromatic, # A placeholder for aromatic information,
atom_total_num_H_one_hot, chirality
],
)})
AttentiveFPBondFeaturizer = BaseBondFeaturizer(featurizer_funcs={'e': ConcatFeaturizer([bond_type_one_hot,
bond_is_conjugated,
bond_is_in_ring,
partial(bond_stereo_one_hot,
allowable_set=[Chem.rdchem.BondStereo.STEREONONE,
Chem.rdchem.BondStereo.STEREOANY,
Chem.rdchem.BondStereo.STEREOZ,
Chem.rdchem.BondStereo.STEREOE], encode_unknown=True)])})
# AtomFeaturizer.feat_size('h') = 85
class MyAtomFeaturizer(BaseAtomFeaturizer):
def __init__(self, atom_data_field='h'):
super(MyAtomFeaturizer, self).__init__(
featurizer_funcs={atom_data_field: ConcatFeaturizer(
[atom_type_one_hot,
atom_degree_one_hot,
atom_implicit_valence_one_hot,
atom_formal_charge,
atom_num_radical_electrons,
atom_hybridization_one_hot,
atom_is_aromatic,
atom_total_num_H_one_hot,
atom_total_degree_one_hot,
atom_chiral_tag_one_hot,
atom_mass]
)})
def set_random_seed(seed=0):
"""Set random seed.
Parameters
----------
seed : int
Random seed to use
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # 为CPU设置种子用于生成随机数
if torch.cuda.is_available():
torch.cuda.manual_seed(seed) # 为当前GPU设置随机种子
# class Meter(object):
# """Track and summarize model performance on a dataset for
# (multi-label) binary classification."""
# def __init__(self):
# self.mask = []
# self.y_pred = []
# self.y_true = []
#
# def update(self, y_pred, y_true, mask):
# """Update for the result of an iteration
#
# Parameters
# ----------
# y_pred : float32 tensor
# Predicted molecule labels with shape (B, T),
# B for batch size and T for the number of tasks
# y_true : float32 tensor
# Ground truth molecule labels with shape (B, T)
# mask : float32 tensor
# Mask for indicating the existence of ground
# truth labels with shape (B, T)
# """
# self.y_pred.append(y_pred.detach().cpu())
# self.y_true.append(y_true.detach().cpu())
# self.mask.append(mask.detach().cpu())
#
# def roc_auc_score(self):
# """Compute roc-auc score for each task.
#
# Returns
# -------
# list of float
# roc-auc score for all tasks
# """
# mask = torch.cat(self.mask, dim=0)
# y_pred = torch.cat(self.y_pred, dim=0)
# y_true = torch.cat(self.y_true, dim=0)
# # Todo: support categorical classes
# # This assumes binary case only
# y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
# n_tasks = y_true.shape[1]
# scores = []
# for task in range(n_tasks):
# task_w = mask[:, task]
# task_y_true = y_true[:, task][task_w != 0].numpy()
# task_y_pred = y_pred[:, task][task_w != 0].numpy()
# scores.append(roc_auc_score(task_y_true, task_y_pred))
# return scores
#
# def l1_loss(self, reduction):
# """Compute l1 loss for each task.
#
# Returns
# -------
# list of float
# l1 loss for all tasks
# reduction : str
# * 'mean': average the metric over all labeled data points for each task
# * 'sum': sum the metric over all labeled data points for each task
# """
# mask = torch.cat(self.mask, dim=0)
# y_pred = torch.cat(self.y_pred, dim=0)
# y_true = torch.cat(self.y_true, dim=0)
# n_tasks = y_true.shape[1]
# scores = []
# for task in range(n_tasks):
# task_w = mask[:, task]
# task_y_true = y_true[:, task][task_w != 0]
# task_y_pred = y_pred[:, task][task_w != 0]
# scores.append(F.l1_loss(task_y_true, task_y_pred, reduction=reduction).item())
# return scores
#
# def rmse(self):
# """Compute RMSE for each task.
#
# Returns
# -------
# list of float
# rmse for all tasks
# """
# mask = torch.cat(self.mask, dim=0)
# y_pred = torch.cat(self.y_pred, dim=0)
# y_true = torch.cat(self.y_true, dim=0)
# n_data, n_tasks = y_true.shape
# scores = []
# for task in range(n_tasks):
# task_w = mask[:, task]
# task_y_true = y_true[:, task][task_w != 0]
# task_y_pred = y_pred[:, task][task_w != 0]
# scores.append(np.sqrt(F.mse_loss(task_y_pred, task_y_true).cpu().item()))
# return scores
#
# def compute_metric(self, metric_name, reduction='mean'):
# """Compute metric for each task.
#
# Parameters
# ----------
# metric_name : str
# Name for the metric to compute.
# reduction : str
# Only comes into effect when the metric_name is l1_loss.
# * 'mean': average the metric over all labeled data points for each task
# * 'sum': sum the metric over all labeled data points for each task
#
# Returns
# -------
# list of float
# Metric value for each task
# """
# assert metric_name in ['roc_auc', 'l1', 'rmse'], \
# 'Expect metric name to be "roc_auc", "l1" or "rmse", got {}'.format(metric_name) # assert(断言)用于判断一个表达式,在表达式条件为 false 的时候触发异常
# assert reduction in ['mean', 'sum']
# if metric_name == 'roc_auc':
# return self.roc_auc_score()
# if metric_name == 'l1':
# return self.l1_loss(reduction)
# if metric_name == 'rmse':
# return self.rmse()
class Meter(object):
"""Track and summarize model performance on a dataset for
(multi-label) binary classification."""
def __init__(self):
self.mask = []
self.y_pred = []
self.y_true = []
def update(self, y_pred, y_true, mask):
"""Update for the result of an iteration
Parameters
----------
y_pred : float32 tensor
Predicted molecule labels with shape (B, T),
B for batch size and T for the number of tasks
y_true : float32 tensor
Ground truth molecule labels with shape (B, T)
mask : float32 tensor
Mask for indicating the existence of ground
truth labels with shape (B, T)
"""
self.y_pred.append(y_pred.detach().cpu())
self.y_true.append(y_true.detach().cpu())
self.mask.append(mask.detach().cpu())
def roc_precision_recall_score(self):
"""Compute AUC_PRC for each task.
Returns
-------
list of float
rmse for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_pred = torch.sigmoid(y_pred)
y_true = torch.cat(self.y_true, dim=0)
n_data, n_tasks = y_true.shape
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
precision, recall, _thresholds = precision_recall_curve(task_y_true, task_y_pred, pos_label=1)
scores.append(auc(recall, precision))
return scores
def roc_auc_score(self):
"""Compute roc-auc score for each task.
Returns
-------
list of float
roc-auc score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
scores.append(roc_auc_score(task_y_true, task_y_pred))
return scores
def l1_loss(self, reduction):
"""Compute l1 loss for each task.
Returns
-------
list of float
l1 loss for all tasks
reduction : str
* 'mean': average the metric over all labeled data points for each task
* 'sum': sum the metric over all labeled data points for each task
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0]
task_y_pred = y_pred[:, task][task_w != 0]
scores.append(F.l1_loss(task_y_true, task_y_pred, reduction=reduction).item())
return scores
def rmse(self):
"""Compute RMSE for each task.
Returns
-------
list of float
rmse for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
n_data, n_tasks = y_true.shape
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0]
task_y_pred = y_pred[:, task][task_w != 0]
scores.append(np.sqrt(F.mse_loss(task_y_pred, task_y_true).cpu().item()))
return scores
def mae(self):
"""Compute mae for each task.
Returns
-------
list of float
mae for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
n_data, n_tasks = y_true.shape
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0]
task_y_pred = y_pred[:, task][task_w != 0]
scores.append(mean_absolute_error(task_y_true, task_y_pred))
return scores
def se(self):
"""Compute se score for each task.
Returns
-------
list of float
se score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >= 0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
se = tp / (tp + fn)
scores.append(se)
return scores
def precision(self):
"""Compute precision score for each task.
Returns
-------
list of float
precision score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >=0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
precision = tp / (tp + fp)
scores.append(precision)
return scores
def sp(self):
"""Compute sp score for each task.
Returns
-------
list of float
sp score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >=0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
sp = tn / (tn + fp)
scores.append(sp)
return scores
def acc(self):
"""Compute acc score for each task.
Returns
-------
list of float
acc score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >=0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
acc = (tp + tn) / (tn + fp + fn + tp)
scores.append(acc)
return scores
def mcc(self):
"""Compute mcc score for each task.
Returns
-------
list of float
mcc score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
# Todo: support categorical classes
# This assumes binary case only
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
n_tasks = y_true.shape[1]
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0].numpy()
task_y_pred = y_pred[:, task][task_w != 0].numpy()
task_y_pred = [1 if i >=0.5 else 0 for i in task_y_pred]
c_mat = confusion_matrix(task_y_true, task_y_pred)
tn, fp, fn, tp = list(c_mat.flatten())
mcc = (tp * tn - fp * fn) / np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn) + 1e-8)
scores.append(mcc)
return scores
def r2(self):
"""Compute r2 score for each task.
Returns
-------
list of float
r2 score for all tasks
"""
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_true = torch.cat(self.y_true, dim=0)
n_data, n_tasks = y_true.shape
scores = []
for task in range(n_tasks):
task_w = mask[:, task]
task_y_true = y_true[:, task][task_w != 0]
task_y_pred = y_pred[:, task][task_w != 0]
scores.append(r2_score(task_y_true, task_y_pred))
return scores
def pred(self):
mask = torch.cat(self.mask, dim=0)
y_pred = torch.cat(self.y_pred, dim=0)
y_pred = torch.sigmoid(y_pred) # 求得为正例的概率
return y_pred
def compute_metric(self, metric_name, reduction='mean'):
"""Compute metric for each task.
Parameters
----------
metric_name : str
Name for the metric to compute.
reduction : str
Only comes into effect when the metric_name is l1_loss.
* 'mean': average the metric over all labeled data points for each task
* 'sum': sum the metric over all labeled data points for each task
Returns
-------
list of float
Metric value for each task
"""
assert metric_name in ['roc_auc', 'l1', 'rmse', 'prc_auc', 'mae', 'r2','se','sp','acc','mcc','pred','precision'], \
'Expect metric name to be "roc_auc", "l1", "rmse", "prc_auc", "mae", "r2","pred" got {}'.format(metric_name) # assert(断言)用于判断一个表达式,在表达式条件为 false 的时候触发异常
assert reduction in ['mean', 'sum']
if metric_name == 'roc_auc':
return self.roc_auc_score()
if metric_name == 'l1':
return self.l1_loss(reduction)
if metric_name == 'rmse':
return self.rmse()
if metric_name == 'prc_auc':
return self.roc_precision_recall_score()
if metric_name == 'se':
return self.se()
if metric_name == 'precision':
return self.precision()
if metric_name == 'sp':
return self.sp()
if metric_name == 'acc':
return self.acc()
if metric_name == 'mcc':
return self.mcc()
if metric_name == 'mae':
return self.mae()
if metric_name == 'r2':
return self.r2()
if metric_name == 'mcc':
return self.mcc()
if metric_name == 'pred':
return self.pred()
class EarlyStopping(object):
"""Early stop performing
Parameters
----------
mode : str
* 'higher': Higher metric suggests a better model
* 'lower': Lower metric suggests a better model
patience : int
Number of epochs to wait before early stop
if the metric stops getting improved
filename : str or None
Filename for storing the model checkpoint
"""
def __init__(self, mode='higher', patience=10, filename=None): # 可以加tolerance
if filename is None:
dt = datetime.datetime.now()
filename = '{}_early_stop_{}_{:02d}-{:02d}-{:02d}.pth'.format(
dt.date(), dt.hour, dt.minute, dt.second)
assert mode in ['higher', 'lower']
self.mode = mode
if self.mode == 'higher':
self._check = self._check_higher # 私有变量(private),只有内部可以访问,外部不能访问
else:
self._check = self._check_lower
self.patience = patience
self.counter = 0
self.filename = filename
self.best_score = None
self.early_stop = False
def _check_higher(self, score, prev_best_score):
return (score > prev_best_score)
def _check_lower(self, score, prev_best_score):
return (score < prev_best_score)
def step(self, score, model):
if self.best_score is None:
self.best_score = score
self.save_checkpoint(model)
elif self._check(score, self.best_score): # 当前模型如果是更优模型,则保存当前模型
self.best_score = score
self.save_checkpoint(model)
self.counter = 0
else:
self.counter += 1
# print(
# f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
return self.early_stop
def save_checkpoint(self, model):
'''Saves model when the metric on the validation set gets improved.'''
torch.save({'model_state_dict': model.state_dict()}, self.filename)
def load_checkpoint(self, model):
'''Load model saved with early stopping.'''
model.load_state_dict(torch.load(self.filename)['model_state_dict'])
# class EarlyStopping(object):
# def __init__(self, args, mode='higher', patience=10, filename=None):
# if filename is None:
# dt = datetime.datetime.now()
# filename = './saved_model/' + '{}_{}_early_stop_{}_{:02d}-{:02d}-{:02d}.pth'.format(args['task'],args['model'], dt.date(), dt.hour, dt.minute,
# dt.second)
#
# assert mode in ['higher', 'lower']
# self.mode = mode
# if self.mode == 'higher':
# self._check = self._check_higher # 私有变量(private),只有内部可以访问,外部不能访问
# else:
# self._check = self._check_lower
#
# self.patience = patience
# self.counter = 0
# self.filename = filename
# self.best_score = None
# self.early_stop = False
#
# def _check_higher(self, score, prev_best_score):
# return (score > prev_best_score)
#
# def _check_lower(self, score, prev_best_score):
# return (score < prev_best_score)
#
# def step(self, score, model):
# if self.best_score is None:
# self.best_score = score
# self.save_checkpoint(model)
# elif self._check(score, self.best_score):
# self.best_score = score
# self.save_checkpoint(model)
# self.counter = 0
# else:
# self.counter += 1
# # print(
# # f'EarlyStopping counter: {self.counter} out of {self.patience}')
# if self.counter >= self.patience:
# self.early_stop = True
# return self.early_stop
#
# def save_checkpoint(self, model):
# '''Saves model when the metric on the validation set gets improved.'''
# torch.save({'model_state_dict': model.state_dict()}, self.filename)
#
# def load_checkpoint(self, model):
# '''Load model saved with early stopping.'''
# model.load_state_dict(torch.load(self.filename)['model_state_dict'])
def collate_molgraphs(data):
assert len(data[0]) in [3, 4], \
'Expect the tuple to be of length 3 or 4, got {:d}'.format(len(data[0]))
if len(data[0]) == 3:
smiles, graphs, labels = map(list, zip(*data))
masks = None
else:
smiles, graphs, labels, masks = map(list, zip(*data))
bg = dgl.batch(graphs)
bg.set_n_initializer(dgl.init.zero_initializer)
bg.set_e_initializer(dgl.init.zero_initializer)
labels = torch.stack(labels, dim=0)
if masks is None:
masks = torch.ones(labels.shape)
else:
masks = torch.stack(masks, dim=0)
return smiles, bg, labels, masks | PypiClean |
/AutONMT-tf-0.1.2.tar.gz/AutONMT-tf-0.1.2/autonmt/corpus.py | import dataclasses
from dataclasses import dataclass
import os
import json
from typing import Dict
class FilenameGenerator:
def __init__(self, workdir) -> None:
self.workdir = workdir
def get_filename(self, tag, stage):
tag_dir = os.path.join(self.workdir, tag)
os.makedirs(tag_dir, exist_ok=True)
return os.path.join(tag_dir, stage)
class Tag:
def __init__(self, id_, filename_gen : FilenameGenerator) -> None:
self.id = id_
self.filename_gen = filename_gen
self.stages = []
self.filenames = {}
def get(self, stage):
assert stage in self.stages, f"Trying to access tag '{self.id}' which is not an output of stage '{stage}'"
return self.filenames[stage]
def get_last(self):
assert self.stages, f"Tag {self.id} has no output yet"
return self.get(self.stages[-1])
def add(self, stage):
assert stage not in self.stages, f"Trying to add an already existing stage to tag {self.id}"
self.stages.append(stage)
filename = self.filename_gen.get_filename(self.id, stage)
self.filenames[stage] = filename
return filename
def to_jsonable(self):
return {
'stages': self.stages,
'filenames': self.filenames
}
@staticmethod
def from_jsonable(id_, filename_gen: FilenameGenerator, data):
result = Tag(id_, filename_gen)
result.stages = data['stages']
result.filenames = data['filenames']
return result
@dataclass
class Corpus:
filename_gen: FilenameGenerator
tags: Dict[str, Tag] = dataclasses.field(default_factory=dict)
def get_tag(self, id_) -> Tag:
assert id_ in self.tags.keys(), f"Trying to access tag '{id_}' which does not exist"
return self.tags[id_]
def get(self, tag, stage):
return self.get_tag(tag).get(stage)
def get_last(self, tag):
return self.get_tag(tag).get_last()
def add(self, tag, stage):
if tag not in self.tags.keys():
self.tags[tag] = Tag(tag, self.filename_gen)
return self.get_tag(tag).add(stage)
def to_jsonable(self):
return {
id_: tag.to_jsonable() for id_, tag in self.tags.items()
}
@staticmethod
def from_jsonable(filename_gen: FilenameGenerator, data):
return Corpus(filename_gen, { id_: Tag.from_jsonable(id_, filename_gen, tag_data) for id_, tag_data in data.items() })
@dataclass
class Corpora:
workdir: str
content: Dict[str, Corpus] = dataclasses.field(default_factory=dict)
def new_corpus(self, name):
assert name not in self.content, 'Trying to create an already existing corpus'
subdir = os.path.join(self.workdir, name)
os.makedirs(subdir, exist_ok=True)
self.content[name] = Corpus(FilenameGenerator(subdir))
def new_corpus_if_missing(self, name):
if name not in self.content:
self.new_corpus(name)
def to_jsonable(self):
return {
'workdir': self.workdir,
'content': { name: corpus.to_jsonable() for name, corpus in self.content.items() }
}
@staticmethod
def from_jsonable(data):
content = {}
workdir = data['workdir']
for name in data['content']:
subdir = os.path.join(workdir, name)
content[name] = Corpus.from_jsonable(FilenameGenerator(subdir), data['content'][name])
return Corpora(data['workdir'], content) | PypiClean |
/Neodroid-0.4.9-py36-none-any.whl/samples/mab/ucb1.py | import math
import sys
from warg.named_ordered_dictionary import NOD
def index_of_max(x):
m = max(x)
return x.index(m)
class UCB1:
def __init__(self, n_options):
self._counts = [0 for _ in range(n_options)]
self._values = [1 / n_options for _ in range(n_options)]
def select_arm(self):
n_options = len(self._counts)
for option in range(n_options):
if self._counts[option] == 0:
return option
ucb_values = [0.0 for _ in range(n_options)]
total_counts = sum(self._counts)
for option in range(n_options):
bonus = math.sqrt(
(2 * math.log(total_counts)) / float(self._counts[option])
)
ucb_values[option] = self._values[option] + bonus
return index_of_max(ucb_values)
def update_belief(self, option_index, signal):
self._counts[option_index] = options_counts_int = self._counts[option_index] + 1
options_counts_float = float(options_counts_int)
value = self._values[option_index]
new_value = ((options_counts_float - 1) / options_counts_float) * value + (
1 / options_counts_float
) * signal
self._values[option_index] = new_value
@property
def counts(self):
return self._counts
@property
def values(self):
return self._values
@property
def normalised_values(self):
s = len(self._values)
normed = [0] * s
for i in range(s):
normed[i] = self._values[i] / (sum(self._values) + sys.float_info.epsilon)
return normed
def train(self, arms, rollouts=1000) -> NOD:
for t in range(rollouts):
chosen_arm = self.select_arm()
reward = arms[chosen_arm].draw()
self.update_belief(chosen_arm, reward)
return NOD()
if __name__ == "__main__":
import random
class NormalDistributionArm:
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def draw(self):
return random.gauss(self.mu, self.sigma)
arms = [
NormalDistributionArm(4.01, 2.0),
NormalDistributionArm(4, 2.0),
NormalDistributionArm(3.99, 2.0),
]
ucb1 = UCB1(len(arms))
ucb1.train(arms)
print(ucb1.counts)
print(ucb1.values) | PypiClean |
/ARGs_OAP-2.3.2.tar.gz/ARGs_OAP-2.3.2/ARGs_OAP/bin/bbmap/sketchblacklist.sh |
usage(){
echo "
Written by Brian Bushnell
Last modified November 7, 2019
Description: Creates a blacklist sketch from common kmers,
which occur in at least X different sequences or taxa.
Please read bbmap/docs/guides/BBSketchGuide.txt for more information.
Usage: sketchblacklist.sh in=<fasta file> out=<sketch file>
Standard parameters:
in=<file> A fasta file containing one or more sequences.
out=<file> Output filename.
mintaxcount=100 Sketch kmers occuring in at least this many taxa.
k=31 Kmer length, 1-32. To maximize sensitivity and
specificity, dual kmer lengths may be used: k=31,24
mode=sequence Possible modes:
sequence: Count kmers once per sequence.
taxa: Count kmers once per taxonomic unit.
name= Set the blacklist sketch name.
delta=t Delta-compress sketches.
a48=t Encode sketches as ASCII-48 rather than hex.
amino=f Amino-acid mode.
entropy=0.66 Ignore sequence with entropy below this value.
keyfraction=0.16 Smaller values reduce blacklist size by ignoring a
a fraction of the key space. Range: 0.0001-0.5.
Taxonomy-specific flags:
tree= Specify a taxtree file. On Genepool, use 'auto'.
gi= Specify a gitable file. On Genepool, use 'auto'.
accession= Specify one or more comma-delimited NCBI accession to
taxid files. On Genepool, use 'auto'.
taxlevel=subspecies Taxa hits below this rank will be promoted and merged
with others.
prefilter=t Use a bloom filter to ignore low-count kmers.
prepasses=2 Number of prefilter passes.
prehashes=2 Number of prefilter hashes.
prebits=-1 Manually override number of prefilter cell bits.
tossjunk=t For taxa mode, discard taxonomically uninformative
sequences. This includes sequences with no taxid,
with a tax level NO_RANK, of parent taxid of LIFE.
silva=f Parse headers using Silva or semicolon-delimited syntax.
Java Parameters:
-Xmx This will set Java's memory usage, overriding autodetection.
-Xmx20g will specify 20 gigs of RAM, and -Xmx200m will specify 200 megs.
The max is typically 85% of physical memory.
-eoom This flag will cause the process to exit if an
out-of-memory exception occurs. Requires Java 8u92+.
-da Disable assertions.
For more detailed information, please read /bbmap/docs/guides/BBSketchGuide.txt.
Please contact Brian Bushnell at [email protected] if you encounter any problems.
"
}
#This block allows symlinked shellscripts to correctly set classpath.
pushd . > /dev/null
DIR="${BASH_SOURCE[0]}"
while [ -h "$DIR" ]; do
cd "$(dirname "$DIR")"
DIR="$(readlink "$(basename "$DIR")")"
done
cd "$(dirname "$DIR")"
DIR="$(pwd)/"
popd > /dev/null
#DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/"
CP="$DIR""current/"
z="-Xmx4g"
z2="-Xms4g"
set=0
if [ -z "$1" ] || [[ $1 == -h ]] || [[ $1 == --help ]]; then
usage
exit
fi
calcXmx () {
source "$DIR""/calcmem.sh"
setEnvironment
parseXmx "$@"
if [[ $set == 1 ]]; then
return
fi
freeRam 4000m 84
z="-Xmx${RAM}m"
z2="-Xms${RAM}m"
}
calcXmx "$@"
sketchblacklist() {
local CMD="java $EA $EOOM $z $z2 -cp $CP sketch.BlacklistMaker $@"
echo $CMD >&2
eval $CMD
}
sketchblacklist "$@" | PypiClean |
/GLManager-1.1.6.tar.gz/GLManager-1.1.6/README.md | # GLManager
## Modo de instalacao manual
* Instalando GIT
```
sudo apt-get update && sudo apt-get install git -y
```
* Instalando script
```
git clone https://github.com/DuTra01/GLManager.git
cd GLManager
pip3 install -r requirements.txt
```
* Execute o script
```
python3 -m app
```
## Modo de instalacao automatizada
* Instalando python3, pip3 e git
```
sudo apt-get update && sudo apt-get install git python3 python3-pip -y
```
* Instalando script
```
pip3 install git+https://github.com/DuTra01/GLManager.git
```
#### Ou
```
git clone https://github.com/DuTra01/GLManager.git
cd GLManager
python3 setup.py install
```
## Atualize o script
```
pip3 install --upgrade git+https://github.com/DuTra01/GLManager.git
```
#### Ou
```
cd ~/
rm -rf GLManager
git clone git+https://github.com/DuTra01/GLManager.git
cd GLManager
python3 setup.py install
```
* Comando de execucao
```
vps
``` | PypiClean |
/ELFEsteem-0.2.1.tar.gz/ELFEsteem-0.2.1/elfesteem/cstruct.py |
import struct
type_size = {}
size2type = {}
for t in 'B', 'H', 'I', 'Q':
s = struct.calcsize(t)
type_size[t] = s * 8
size2type[s * 8] = t
type_size['u08'] = size2type[8]
type_size['u16'] = size2type[16]
type_size['u32'] = size2type[32]
type_size['u64'] = size2type[64]
def fix_size(fields, wsize):
out = []
for name, v in fields:
if v.endswith("s"):
pass
elif v == "ptr":
v = size2type[wsize]
elif not v in type_size:
raise ValueError("unkown Cstruct type", v)
else:
v = type_size[v]
out.append((name, v))
fields = out
return fields
class Cstruct_Metaclass(type):
def __new__(cls, name, bases, dct):
o = super(Cstruct_Metaclass, cls).__new__(cls, name, bases, dct)
o._packstring = o._packformat + \
"".join(map(lambda x: x[1], o._fields))
o._size = struct.calcsize(o._packstring)
return o
class CStruct(object):
#__metaclass__ = Cstruct_Metaclass
_packformat = ""
_fields = []
@classmethod
def _from_file(cls, f):
return cls(f.read(cls._size))
def __init__(self, sex, wsize, *args, **kargs):
if sex == 1:
sex = '<'
else:
sex = '>'
# packformat enforce sex
if self._packformat:
sex = ""
pstr = fix_size(self._fields, wsize)
self._packstring = sex + self._packformat + \
"".join(map(lambda x: x[1], pstr))
self._size = struct.calcsize(self._packstring)
self._names = map(lambda x: x[0], self._fields)
if kargs:
self.__dict__.update(kargs)
else:
s = ""
if args:
s = args[0]
s += "\x00" * self._size
s = s[:self._size]
self._unpack(s)
def _unpack(self, s):
disas = struct.unpack(self._packstring, s)
for n, v in zip(self._names, disas):
setattr(self, n, v)
def _pack(self):
return struct.pack(self._packstring,
*map(lambda x: getattr(self, x), self._names))
def _spack(self, superstruct, shift=0):
attr0 = map(lambda x: getattr(self, x), self._names)
attr = []
for s in attr0:
if isinstance(s, CStruct):
if s in superstruct:
s = reduce(lambda x, y: x + len(y),
superstruct[:superstruct.index(s)],
0)
s += shift
else:
raise Exception("%s not un superstructure" % repr(s))
attr.append(s)
return struct.pack(self._packstring, *attr)
def _copy(self):
return self.__class__(**self.__dict__)
def __len__(self):
return self._size
def __str__(self):
return self._pack()
def __repr__(self):
return "<%s=%s>" % (self.__class__.__name__, "/".join(map(lambda x: repr(getattr(self, x[0])), self._fields)))
def __getitem__(self, item): # to work with format strings
return getattr(self, item)
def _show(self):
print "##%s:" % self.__class__.__name__
fmt = "%%-%is = %%r" % max(map(lambda x: len(x[0]), self._fields))
for fn, ft in self._fields:
print fmt % (fn, getattr(self, fn))
class CStructStruct:
def __init__(self, lst, shift=0):
self._lst = lst
self._shift = shift
def __getattr__(self, attr):
return getattr(self._lst, attr)
def __str__(self):
s = []
for a in self._lst:
if type(a) is str:
s.append(a)
else:
s.append(a._spack(self._lst, self._shift))
return "".join(s) | PypiClean |
/NASGrpcFileSystem2-1.3.8.tar.gz/NASGrpcFileSystem2-1.3.8/NASGrpcFileSystem/proto/files_pb2.py |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='files.proto',
package='',
syntax='proto3',
serialized_options=None,
serialized_pb=b'\n\x0b\x66iles.proto\";\n\rcheck_request\x12\x0c\n\x04\x66ile\x18\x01 \x01(\x0c\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0e\n\x06\x66_code\x18\x03 \x01(\t\"\x95\x01\n\x0e\x63reate_request\x12\x13\n\x0bx_file_path\x18\x01 \x01(\t\x12\x11\n\tfile_data\x18\x02 \x01(\x0c\x12\x11\n\tfile_name\x18\x03 \x01(\t\x12\x14\n\x0cx_mount_path\x18\x04 \x01(\t\x12\x0e\n\x06x_type\x18\x05 \x01(\t\x12\x12\n\nis_replace\x18\x06 \x01(\x08\x12\x0e\n\x06\x66_code\x18\x07 \x01(\t\"7\n\x10\x64\x65scribe_request\x12\x13\n\x0blocate_file\x18\x01 \x01(\t\x12\x0e\n\x06\x66_code\x18\x02 \x01(\t\"Z\n\x0emodify_request\x12\x11\n\tfile_path\x18\x01 \x01(\t\x12\x11\n\tfile_name\x18\x02 \x01(\t\x12\x12\n\nis_replace\x18\x03 \x01(\x08\x12\x0e\n\x06\x66_code\x18\x04 \x01(\t\"Q\n\x0c\x63opy_request\x12\x1a\n\x12original_file_path\x18\x01 \x01(\t\x12\x15\n\rnew_file_path\x18\x02 \x01(\t\x12\x0e\n\x06\x66_code\x18\x03 \x01(\t\"Q\n\x0cmove_request\x12\x1a\n\x12original_file_path\x18\x01 \x01(\t\x12\x15\n\rnew_file_path\x18\x02 \x01(\t\x12\x0e\n\x06\x66_code\x18\x03 \x01(\t\"4\n\nfile_reply\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03\x65rr\x18\x02 \x01(\t\x12\x0b\n\x03\x62iz\x18\x03 \x01(\t\"6\n\x0b\x66ile_stream\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0b\n\x03\x65rr\x18\x02 \x01(\t\x12\x0c\n\x04\x66ile\x18\x03 \x01(\x0c\x32\x9d\x02\n\nFileWorker\x12,\n\x0bhealthCheck\x12\x0e.check_request\x1a\x0b.file_reply\"\x00\x12\x31\n\x0c\x64\x65scribeFile\x12\x11.describe_request\x1a\x0c.file_stream\"\x00\x12,\n\ncreateFile\x12\x0f.create_request\x1a\x0b.file_reply\"\x00\x12,\n\nmodifyFile\x12\x0f.modify_request\x1a\x0b.file_reply\"\x00\x12(\n\x08\x63opyFile\x12\r.copy_request\x1a\x0b.file_reply\"\x00\x12(\n\x08moveFile\x12\r.move_request\x1a\x0b.file_reply\"\x00\x62\x06proto3'
)
_CHECK_REQUEST = _descriptor.Descriptor(
name='check_request',
full_name='check_request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file', full_name='check_request.file', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='name', full_name='check_request.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='f_code', full_name='check_request.f_code', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=15,
serialized_end=74,
)
_CREATE_REQUEST = _descriptor.Descriptor(
name='create_request',
full_name='create_request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='x_file_path', full_name='create_request.x_file_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='file_data', full_name='create_request.file_data', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='file_name', full_name='create_request.file_name', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='x_mount_path', full_name='create_request.x_mount_path', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='x_type', full_name='create_request.x_type', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='is_replace', full_name='create_request.is_replace', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='f_code', full_name='create_request.f_code', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=226,
)
_DESCRIBE_REQUEST = _descriptor.Descriptor(
name='describe_request',
full_name='describe_request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='locate_file', full_name='describe_request.locate_file', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='f_code', full_name='describe_request.f_code', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=228,
serialized_end=283,
)
_MODIFY_REQUEST = _descriptor.Descriptor(
name='modify_request',
full_name='modify_request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='file_path', full_name='modify_request.file_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='file_name', full_name='modify_request.file_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='is_replace', full_name='modify_request.is_replace', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='f_code', full_name='modify_request.f_code', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=285,
serialized_end=375,
)
_COPY_REQUEST = _descriptor.Descriptor(
name='copy_request',
full_name='copy_request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='original_file_path', full_name='copy_request.original_file_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='new_file_path', full_name='copy_request.new_file_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='f_code', full_name='copy_request.f_code', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=377,
serialized_end=458,
)
_MOVE_REQUEST = _descriptor.Descriptor(
name='move_request',
full_name='move_request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='original_file_path', full_name='move_request.original_file_path', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='new_file_path', full_name='move_request.new_file_path', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='f_code', full_name='move_request.f_code', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=460,
serialized_end=541,
)
_FILE_REPLY = _descriptor.Descriptor(
name='file_reply',
full_name='file_reply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='file_reply.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='err', full_name='file_reply.err', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='biz', full_name='file_reply.biz', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=543,
serialized_end=595,
)
_FILE_STREAM = _descriptor.Descriptor(
name='file_stream',
full_name='file_stream',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='file_stream.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='err', full_name='file_stream.err', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
_descriptor.FieldDescriptor(
name='file', full_name='file_stream.file', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR,),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=597,
serialized_end=651,
)
DESCRIPTOR.message_types_by_name['check_request'] = _CHECK_REQUEST
DESCRIPTOR.message_types_by_name['create_request'] = _CREATE_REQUEST
DESCRIPTOR.message_types_by_name['describe_request'] = _DESCRIBE_REQUEST
DESCRIPTOR.message_types_by_name['modify_request'] = _MODIFY_REQUEST
DESCRIPTOR.message_types_by_name['copy_request'] = _COPY_REQUEST
DESCRIPTOR.message_types_by_name['move_request'] = _MOVE_REQUEST
DESCRIPTOR.message_types_by_name['file_reply'] = _FILE_REPLY
DESCRIPTOR.message_types_by_name['file_stream'] = _FILE_STREAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
check_request = _reflection.GeneratedProtocolMessageType('check_request', (_message.Message,), {
'DESCRIPTOR': _CHECK_REQUEST,
'__module__': 'files_pb2'
# @@protoc_insertion_point(class_scope:check_request)
})
_sym_db.RegisterMessage(check_request)
create_request = _reflection.GeneratedProtocolMessageType('create_request', (_message.Message,), {
'DESCRIPTOR': _CREATE_REQUEST,
'__module__': 'files_pb2'
# @@protoc_insertion_point(class_scope:create_request)
})
_sym_db.RegisterMessage(create_request)
describe_request = _reflection.GeneratedProtocolMessageType('describe_request', (_message.Message,), {
'DESCRIPTOR': _DESCRIBE_REQUEST,
'__module__': 'files_pb2'
# @@protoc_insertion_point(class_scope:describe_request)
})
_sym_db.RegisterMessage(describe_request)
modify_request = _reflection.GeneratedProtocolMessageType('modify_request', (_message.Message,), {
'DESCRIPTOR': _MODIFY_REQUEST,
'__module__': 'files_pb2'
# @@protoc_insertion_point(class_scope:modify_request)
})
_sym_db.RegisterMessage(modify_request)
copy_request = _reflection.GeneratedProtocolMessageType('copy_request', (_message.Message,), {
'DESCRIPTOR': _COPY_REQUEST,
'__module__': 'files_pb2'
# @@protoc_insertion_point(class_scope:copy_request)
})
_sym_db.RegisterMessage(copy_request)
move_request = _reflection.GeneratedProtocolMessageType('move_request', (_message.Message,), {
'DESCRIPTOR': _MOVE_REQUEST,
'__module__': 'files_pb2'
# @@protoc_insertion_point(class_scope:move_request)
})
_sym_db.RegisterMessage(move_request)
file_reply = _reflection.GeneratedProtocolMessageType('file_reply', (_message.Message,), {
'DESCRIPTOR': _FILE_REPLY,
'__module__': 'files_pb2'
# @@protoc_insertion_point(class_scope:file_reply)
})
_sym_db.RegisterMessage(file_reply)
file_stream = _reflection.GeneratedProtocolMessageType('file_stream', (_message.Message,), {
'DESCRIPTOR': _FILE_STREAM,
'__module__': 'files_pb2'
# @@protoc_insertion_point(class_scope:file_stream)
})
_sym_db.RegisterMessage(file_stream)
_FILEWORKER = _descriptor.ServiceDescriptor(
name='FileWorker',
full_name='FileWorker',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=654,
serialized_end=939,
methods=[
_descriptor.MethodDescriptor(
name='healthCheck',
full_name='FileWorker.healthCheck',
index=0,
containing_service=None,
input_type=_CHECK_REQUEST,
output_type=_FILE_REPLY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='describeFile',
full_name='FileWorker.describeFile',
index=1,
containing_service=None,
input_type=_DESCRIBE_REQUEST,
output_type=_FILE_STREAM,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='createFile',
full_name='FileWorker.createFile',
index=2,
containing_service=None,
input_type=_CREATE_REQUEST,
output_type=_FILE_REPLY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='modifyFile',
full_name='FileWorker.modifyFile',
index=3,
containing_service=None,
input_type=_MODIFY_REQUEST,
output_type=_FILE_REPLY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='copyFile',
full_name='FileWorker.copyFile',
index=4,
containing_service=None,
input_type=_COPY_REQUEST,
output_type=_FILE_REPLY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='moveFile',
full_name='FileWorker.moveFile',
index=5,
containing_service=None,
input_type=_MOVE_REQUEST,
output_type=_FILE_REPLY,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_FILEWORKER)
DESCRIPTOR.services_by_name['FileWorker'] = _FILEWORKER
# @@protoc_insertion_point(module_scope) | PypiClean |
/FitBenchmarking-1.0.0.tar.gz/FitBenchmarking-1.0.0/fitbenchmarking/parsing/fitting_problem.py | try:
from itertools import izip_longest
except ImportError:
# python3
from itertools import zip_longest as izip_longest
import numpy as np
from fitbenchmarking.utils.debug import get_printable_table
from fitbenchmarking.utils.exceptions import FittingProblemError, \
IncorrectBoundsError
from fitbenchmarking.utils.timer import TimerWithMaxTime
# Using property getters and setters means that the setter does not always use
# self
# pylint: disable=no-self-use
class FittingProblem:
r"""
Definition of a fitting problem, which will be populated by a parser from a
problem definition file.
Onces populated, this should include the data, the function and any other
additional requirements from the data.
"""
def __init__(self, options):
"""
Initialises variable used for temporary storage.
:param options: all the information specified by the user
:type options: fitbenchmarking.utils.options.Options
"""
self.options = options
#: *string* Name (title) of the fitting problem
self.name = None
#: *string* Name of the problem definition type (e.g., 'cutest')
self.format = None
#: *string* The plot scale for the y and x data
self.plot_scale = None
#: *string* Equation (function or model) to fit against data
self.equation = None
#: *string* Description of the fitting problem
self.description = ''
#: *float* The start of the range to fit model data over
#: (if different from entire range)
self.start_x = None
#: *float* The end of the range to fit model data over
#: (if different from entire range) (/float/)
self.end_x = None
#: *numpy array* The x-data
self.data_x = None
#: *numpy array* The y-data
self.data_y = None
#: *numpy array* The errors or weights
self.data_e = None
#: *list of dict*
#: Starting values of the fitting parameters
#:
#: e.g.
#: :code:`[{p1_name: p1_val1, p2_name: p2_val1, ...},
#: {p1_name: p1_val2, ...}, ...]`
self.starting_values: list = []
#: *list*
#: Smallest and largest values of interest in the data
#:
#: e.g.
#: :code:`[(p1_min, p1_max), (p2_min, p2_max),...]`
self.value_ranges = None
#: Callable function
self.function = None
self._param_names = None
#: *numpy array* The index for sorting the data (used in plotting)
self.sorted_index = None
#: *dict*
#: Container for software specific information.
#: This should be avoided if possible.
self.additional_info = {}
#: *bool* Used to check if a problem is using multifit.
self.multifit = False
#: Callable function for the Jacobian
self.jacobian = None
#: *bool*
#: Whether the function has been wrapped to reduce the dimension of x
#: on function calls
self.multivariate = False
#: Callable function for the Hessian
self.hessian = None
# The timer used to check if the 'max_runtime' is exceeded.
self.timer = TimerWithMaxTime(self.options.max_runtime)
#: Cache for calulating the initial value of the problem for plots
self._ini_y = {}
def __str__(self):
info = {"Name": self.name,
"Format": self.format,
"Equation": self.equation,
"Params": self._param_names,
"Start X": self.start_x,
"End X": self.end_x,
"MultiFit": self.multifit}
return get_printable_table("FittingProblem", info)
def eval_model(self, params, **kwargs):
"""
Function evaluation method
:param params: parameter value(s)
:type params: list
:return: data values evaluated from the function of the problem
:rtype: numpy array
"""
if self.function is None:
raise FittingProblemError('Cannot call function before setting '
'function.')
self.timer.check_elapsed_time()
x = kwargs.get("x", self.data_x)
return self.function(x, *params) # pylint: disable=not-callable
@property
def param_names(self):
"""
Utility function to get the parameter names
:return: the names of the parameters
:rtype: list of str
"""
if self._param_names is None:
self._param_names = list(self.starting_values[0].keys())
return self._param_names
@param_names.setter
def param_names(self, value):
raise FittingProblemError('param_names should not be edited')
def get_function_params(self, params):
"""
Return the function definition in a string format for output
:param params: The parameters to use in the function string
:type params: list
:return: Representation of the function
example format: 'b1 * (b2+x) | b1=-2.0, b2=50.0'
:rtype: string
"""
params = [f'{n}={p}' for n, p
in izip_longest(self.param_names,
params if params is not None else [])]
param_string = ', '.join(params)
return param_string
def verify(self):
"""
Basic check that minimal set of attributes have been set.
Raise FittingProblemError if object is not properly initialised.
"""
values = {'data_x': np.ndarray,
'data_y': np.ndarray,
'starting_values': list}
for attr_name, attr_type in values.items():
attr = getattr(self, attr_name)
type_match = isinstance(attr, attr_type)
try:
type_match = type_match or isinstance(attr[0], attr_type) # NOQA; pylint: disable=unsubscriptable-object
except (TypeError, IndexError):
pass
if not type_match:
raise FittingProblemError(
f'Attribute "{attr_name}" is not the expected type.'
f' Expected "{attr_type}", got "{type(attr)}".')
if self.function is None:
raise FittingProblemError('Attribute "function" has not been set.')
def correct_data(self):
"""
Strip data that overruns the start and end x_range,
and approximate errors if not given.
Modifications happen on member variables.
"""
use_errors = "weighted_nlls" in self.options.cost_func_type
if not self.multifit:
correct_vals = correct_data(x=self.data_x,
y=self.data_y,
e=self.data_e,
startx=self.start_x,
endx=self.end_x,
use_errors=use_errors)
self.data_x = correct_vals[0]
self.data_y = correct_vals[1]
self.data_e = correct_vals[2]
self.sorted_index = correct_vals[3]
else:
# Mantid multifit problem
self.sorted_index = []
num_data = len(self.data_x)
for i in range(num_data):
# pylint: disable=unsubscriptable-object
correct_vals = correct_data(x=self.data_x[i],
y=self.data_y[i],
e=self.data_e[i],
startx=self.start_x[i],
endx=self.end_x[i],
use_errors=use_errors)
self.data_x[i] = correct_vals[0]
self.data_y[i] = correct_vals[1]
self.data_e[i] = correct_vals[2]
self.sorted_index.append(correct_vals[3])
def set_value_ranges(self, value_ranges):
"""
Function to format parameter bounds before passing to controllers,
so self.value_ranges is a list of tuples, which contain lower and
upper bounds (lb,ub) for each parameter in the problem
:param value_ranges: dictionary of bounded parameter names with
lower and upper bound values e.g.
:code:`{p1_name: [p1_min, p1_max], ...}`
:type value_ranges: dict
"""
lower_param_names = [name.lower()
for name in self.starting_values[0].keys()]
if not all(name in lower_param_names for name in value_ranges):
raise IncorrectBoundsError('One or more of the parameter names in '
'the `parameter_ranges` dictionary is '
'incorrect, please check the problem '
'definiton file for this problem.')
self.value_ranges = []
for name in lower_param_names:
if name in value_ranges:
self.value_ranges.append(
(value_ranges[name][0], value_ranges[name][1]))
else:
self.value_ranges.append((-np.inf, np.inf))
def ini_y(self, parameter_set=0):
"""
Return the result of evaluating the problem at the initial parameters
for plotting.
:param parameter_set: The initial parameters to use, defaults to 0
:type parameter_set: int, optional
:return: The initial estimates
:rtype: numpy.ndarray
"""
if parameter_set not in self._ini_y:
params = self.starting_values[parameter_set].values()
if self.multifit:
self._ini_y[parameter_set] = [
self.eval_model(params=params, x=x)
for x in self.data_x
]
else:
self._ini_y[parameter_set] = self.eval_model(params=params)
return self._ini_y[parameter_set]
def correct_data(x, y, e, startx, endx, use_errors):
"""
Strip data that overruns the start and end x_range,
and approximate errors if not given.
:param x: x data
:type x: np.array
:param y: y data
:type y: np.array
:param e: error data
:type e: np.array
:param startx: minimum x value
:type startx: float
:param endx: maximum x value
:type endx: float
:param use_errors: whether errors should be added if not present
:type use_errors: bool
"""
# fix data_e
if use_errors:
if e is None:
e = np.sqrt(abs(y))
# The values of data_e are used to divide the residuals.
# If these are (close to zero), then this blows up.
# This is particularly a problem if trying to fit
# counts, which may follow a Poisson distribution.
#
# Fix this by cutting values less than a certain value
trim_value = 1.0e-8
e[e < trim_value] = trim_value
else:
e = None
# impose x ranges
# pylint: disable=no-member, assignment-from-no-return
if startx is not None and endx is not None:
mask = np.logical_and(x >= startx,
x <= endx)
x = x[mask]
y = y[mask]
if e is not None:
e = e[mask]
# Stores the indices of the sorted data
sorted_index = np.argsort(x)
return x, y, e, sorted_index | PypiClean |
/Django-4.2.4.tar.gz/Django-4.2.4/django/contrib/auth/migrations/0001_initial.py | import django.contrib.auth.models
from django.contrib.auth import validators
from django.db import migrations, models
from django.utils import timezone
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "__first__"),
]
operations = [
migrations.CreateModel(
name="Permission",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("name", models.CharField(max_length=50, verbose_name="name")),
(
"content_type",
models.ForeignKey(
to="contenttypes.ContentType",
on_delete=models.CASCADE,
verbose_name="content type",
),
),
("codename", models.CharField(max_length=100, verbose_name="codename")),
],
options={
"ordering": [
"content_type__app_label",
"content_type__model",
"codename",
],
"unique_together": {("content_type", "codename")},
"verbose_name": "permission",
"verbose_name_plural": "permissions",
},
managers=[
("objects", django.contrib.auth.models.PermissionManager()),
],
),
migrations.CreateModel(
name="Group",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"name",
models.CharField(unique=True, max_length=80, verbose_name="name"),
),
(
"permissions",
models.ManyToManyField(
to="auth.Permission", verbose_name="permissions", blank=True
),
),
],
options={
"verbose_name": "group",
"verbose_name_plural": "groups",
},
managers=[
("objects", django.contrib.auth.models.GroupManager()),
],
),
migrations.CreateModel(
name="User",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("password", models.CharField(max_length=128, verbose_name="password")),
(
"last_login",
models.DateTimeField(
default=timezone.now, verbose_name="last login"
),
),
(
"is_superuser",
models.BooleanField(
default=False,
help_text=(
"Designates that this user has all permissions without "
"explicitly assigning them."
),
verbose_name="superuser status",
),
),
(
"username",
models.CharField(
help_text=(
"Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."
),
unique=True,
max_length=30,
verbose_name="username",
validators=[validators.UnicodeUsernameValidator()],
),
),
(
"first_name",
models.CharField(
max_length=30, verbose_name="first name", blank=True
),
),
(
"last_name",
models.CharField(
max_length=30, verbose_name="last name", blank=True
),
),
(
"email",
models.EmailField(
max_length=75, verbose_name="email address", blank=True
),
),
(
"is_staff",
models.BooleanField(
default=False,
help_text=(
"Designates whether the user can log into this admin site."
),
verbose_name="staff status",
),
),
(
"is_active",
models.BooleanField(
default=True,
verbose_name="active",
help_text=(
"Designates whether this user should be treated as active. "
"Unselect this instead of deleting accounts."
),
),
),
(
"date_joined",
models.DateTimeField(
default=timezone.now, verbose_name="date joined"
),
),
(
"groups",
models.ManyToManyField(
to="auth.Group",
verbose_name="groups",
blank=True,
related_name="user_set",
related_query_name="user",
help_text=(
"The groups this user belongs to. A user will get all "
"permissions granted to each of their groups."
),
),
),
(
"user_permissions",
models.ManyToManyField(
to="auth.Permission",
verbose_name="user permissions",
blank=True,
help_text="Specific permissions for this user.",
related_name="user_set",
related_query_name="user",
),
),
],
options={
"swappable": "AUTH_USER_MODEL",
"verbose_name": "user",
"verbose_name_plural": "users",
},
managers=[
("objects", django.contrib.auth.models.UserManager()),
],
),
] | PypiClean |
/ForesightPy-1.0.11.tar.gz/ForesightPy-1.0.11/Foresight/deeplearning.py | from copy import deepcopy
import time
import random
import warnings
import numpy as np
import matplotlib.pyplot as plt
import torch
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
def set_seed(seed):
"""Sets the random seeds to ensure deterministic behaviour.
:param seed: The number that is set for the random seeds
:type seed: int
:return: Confirmation that seeds have been set
:rtype: bool
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# If running on CUDA additional seeds need to be set
if torch.cuda.device_count() > 0 and torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.enabled = False
warnings.filterwarnings("ignore")
return True
def model_save(model, name, path="../Results/Pths/"):
"""Saving function for the model.
:param model: The model to save
:type model: torch.nn
:param name: The name to save the model under
:type name: string
:param path: The directory path to save the model in
:type path: string
"""
print("Saving model:", path + name + '.pth')
torch.save(model, path + name + '.pth')
def model_load(model_name, device, path="../Results/Pths/"):
"""Loading function for the models.
:param model_name: The model name to load
:type model_name: string
:param device: The device to run on (Cpu or CUDA)
:type device: string
:param path: The directory path to load the model from
:type path: string
"""
model = torch.load(path + model_name + '.pth', map_location=device)
return model
class EarlyStopping:
"""Used to facilitate early stopping during the training
of neural networks.
When called if the validation accuracy has not relative improved below a
relative tolerance set by the user the a counter is incremented. If the
counter passes a set value then the stop attribute is set to true. This
should be used as a break condition in the training loop.
If rel_tol is set to 0 then the metric just needs to improve from it's
existing value
:param patience: The amount of epochs without improvement before stopping
:type patience: int
:param rel_tol: The relative improvement % that must be achieved
:type rel_tol: float
:param verbose: Whether to print the count number
:type verbose: bool
:param best: The best score achieved so far
:type best: float
:param counter: The amount of epochs without improvement so far
:type counter: int
:param stop: Whether stopping criteria is achieved
:type stop: bool
"""
def __init__(self, patience, rel_tol, verbose=True):
self.patience = patience
self.rel_tol = rel_tol
self.verbose = verbose
self.best_score = np.inf
self.counter = 0
self.stop = False
def __call__(self, score):
"""Every time the object is called the score is checked to see if it
has improved. If it hasn't the counter is incremented, if it has
improved then the counter resets"""
if score >= self.best_score * (1 - self.rel_tol):
self.counter += 1
else:
self.counter = 0
if score < self.best_score:
self.best_score = score
if self.counter >= self.patience:
self.stop = True
if self.verbose:
print("Count:", self.counter)
class DeepLearning:
"""Class to perform training and validation for a given model
:param model: The neural network model
:type model: nn.module
:param data_X: The training dataset
:type data_X: np.array
:param data_y: the target dataset
:type data_y: np.array
:param n_epochs: The number of epochs of training
:type n_epochs: int
:param optimiser: The type of optimiser used
:type optimiser: torch.optim
:param batch_size: The batch size
:type batch_size: int
:param loss_function: The loss function used
:type loss_function: torch.nn.modules.loss
:param device: The device to run on (Cpu or CUDA)
:type device: string
:param seed: The number that is set for the random seeds
:type seed: int
:param debug: Whether to print some parameters for checking
:type debug: bool
:param disp_freq: The epoch frequency that training/validation
metrics will be printed on
:type disp_freq: int
:param fig_disp_freq: The frequency that training/validation prediction
figures will be made
:type fig_disp_freq: int
:param early_stop: Whether early stopping is utilized
:type early_stop: bool
:param early_verbose: Whether to print out the early stopping counter
:type early_verbose: bool
:param patience: The amount of epochs without improvement before
:type patience: stopping int
:param rel_tol: The relative improvement percentage that must be
achieved float
:type rel_tol:
:param scaler_data_X: The data X scaler object for inverse scaling
:type scaler_data_X: sklearn.preprocessing.data.MinMaxScaler
:param scaler_data_y: The dataX y scaler object for inverse scaling
:type scaler_data_y: sklearn.preprocessing.data.MinMaxScaler
"""
def __init__(self, model, data_X, data_y,
optimiser,
batch_size=128,
n_epochs=100,
loss_function=torch.nn.MSELoss(reduction='sum'),
device="cpu",
seed=42,
debug=True,
disp_freq=20,
fig_disp_freq=50,
early_stop=True,
early_verbose=False,
patience=50,
rel_tol=0,
scaler_data_X=None,
scaler_data_y=None):
# Given parameters
self.model = model
self.optimiser = optimiser
self.data_X = data_X
self.data_y = data_y
self.n_epochs = n_epochs
self.loss_function = loss_function
self.device = device
self.seed = seed
self.debug = debug
self.disp_freq = disp_freq
self.fig_disp_freq = fig_disp_freq
self.batch_size = batch_size
self.early_stop = early_stop
# The maxminscaler objects used to scale the data.
# Stored so can be used for inverse scaling later if loading
# pickled objects
self.scaler_data_X = scaler_data_X
self.scaler_data_y = scaler_data_y
# Store for training/val logs
self.logs = {}
# The array of predicted values calculated using the training function
self.train_predictions = None
# The array of predicted values calculated using the validate function
self.val_predictions = None
# The array of predicted values calculated using the evaluate function
self.test_predictions = None
# The data
self.X_train = None
self.X_val = None
self.X_test = None
# The targets
self.y_train = None
self.y_val = None
self.y_test = None
# The data loader
self.train_loader = None
self.val_loader = None
self.test_loader = None
# Storing the best model
self.best_model = self.model
# Dummy variable for inspecting quantities
self.inspect = None
# Used to running data loaders on CUDA
self.pin_memory = False
# Initialising the early stop object
if self.early_stop:
self.early = EarlyStopping(patience=patience,
rel_tol=rel_tol,
verbose=early_verbose)
# Tag for using multi task learning
self.mtl = False
# The tracker for the best validation score
self.best_val_score = np.inf
def train_val_test(self):
"""Splits the DataFrames in to a training, validation
and test set and creates torch tensors from the underlying
numpy arrays"""
# Splitting the datasets into (train/val) and test.
self.X_train, self.X_test, \
self.y_train, self.y_test = train_test_split(
self.data_X, self.data_y, test_size=0.2, shuffle=False)
# Splitting the (train/val) dataset into train and val datasets.
self.X_train, self.X_val, \
self.y_train, self.y_val = train_test_split(
self.X_train, self.y_train, test_size=0.25, shuffle=False)
if self.debug:
print("Train Length: \t\t%i\n"
"Validation Length: \t%i\n"
"Test Length:\t\t%i"
% (len(self.X_train), len(self.X_val), len(self.X_test)))
# Tensor of training data/labels
self.X_train = torch.from_numpy(self.X_train).float()
self.y_train = torch.from_numpy(self.y_train).float()
# Tensor of validation data/labels
self.X_val = torch.from_numpy(self.X_val).float()
self.y_val = torch.from_numpy(self.y_val).float()
# Tensor of test data/labels
self.X_test = torch.from_numpy(self.X_test).float()
self.y_test = torch.from_numpy(self.y_test).float()
# Size Check
if self.debug:
print("\nInitial Size Check:")
self.size_check()
# If the labels have more than one target feature then using MTL
if self.y_test.shape[1] > 1:
self.mtl = True
def size_check(self):
"""Checks the size of the datasets"""
print("\nX Train Shape:\t\t", self.X_train.size())
print("X Val Shape:\t\t", self.X_val.size())
print("X Test Shape:\t\t", self.X_test.size())
print("\ny Train Shape:\t\t", self.y_train.size())
print("y Val Shape:\t\t", self.y_val.size())
print("y Test Shape:\t\t", self.y_test.size())
def create_data_loaders(self):
"""Forms iterators to pipeline in the data/labels"""
# Creating Tensor datasets
train_dataset = TensorDataset(self.X_train, self.y_train)
val_dataset = TensorDataset(self.X_val, self.y_val)
test_dataset = TensorDataset(self.X_test, self.y_test)
if self.device == 'cuda':
self.pin_memory = True
# Creating data loaders
self.train_loader = DataLoader(dataset=train_dataset,
batch_size=self.batch_size,
shuffle=False, num_workers=4,
pin_memory=self.pin_memory)
self.val_loader = DataLoader(dataset=val_dataset,
batch_size=self.batch_size, shuffle=False,
num_workers=4, pin_memory=self.pin_memory)
self.test_loader = DataLoader(dataset=test_dataset,
batch_size=self.batch_size,
shuffle=False, num_workers=4,
pin_memory=self.pin_memory)
def train(self, train_loader):
"""Performs a single training epoch and returns the loss metric
for the training dataset.
:param train_loader: The iterator that feeds in the training data
:type train_loader: torch.utils.data.dataloader.DataLoader
:return: The error metric for that epoch
:rtype: float
"""
# Sets the model to train mode
self.model.train()
train_loss = 0.
# Initialising the empty array for target predictions
if self.mtl:
pred_list = np.empty((0, self.y_test.shape[1]))
else:
pred_list = np.empty((0, 1))
# The data loader creates batches of data to train
for X_train_batch, y_train_batch in train_loader:
# Sending the data to GPU if available
X_train_batch = X_train_batch.to(self.device)
y_train_batch = y_train_batch.to(self.device)
# Zeros the gradients
self.optimiser.zero_grad()
# Some background operation in the underlying CUDA optimisation
# code changes the seeds, so resetting it here ensures
# deterministic results
set_seed(42)
# Perform forward pass
y_pred = self.model(X_train_batch)
# Calculate loss for the batch
loss = self.loss_function(y_pred, y_train_batch)
# Perform backward pass
loss.backward()
# Adding the predictions for this batch to prediction list
pred_list = np.concatenate(
[pred_list, y_pred.detach().cpu().numpy()], axis=0)
# Calculate the training loss
train_loss += (loss*X_train_batch.size()[0]).detach().cpu().numpy()
# Update Parameters
self.optimiser.step()
# Storing the predictions
self.train_predictions = pred_list
return train_loss / len(train_loader.dataset.tensors[0])
def validate(self, val_loader):
"""Evaluates the performance of the network on unseen validation data.
:param val_loader: the iterator that feeds in the validation data
:type val_loader: torch.utils.data.dataloader.DataLoader
:return: the error metric for that epoch
:rtype: float
"""
# Set the model to evaluate mode
self.model.eval()
val_loss = 0.
# Initialising the empty array for target predictions
if self.mtl:
val_pred_list = np.empty((0, self.y_test.shape[1]))
else:
val_pred_list = np.empty((0, 1))
# The data loader creates batches of data to validate
for X_val_batch, y_val_batch in val_loader:
# Ensures that the gradients are not updated
with torch.no_grad():
# Sending the data to GPU if available
X_val_batch = X_val_batch.to(self.device)
y_val_batch = y_val_batch.to(self.device)
# Perform forward pass
y_pred = self.model(X_val_batch)
# Calculate loss for the batch
loss = self.loss_function(y_pred, y_val_batch)
# Adding the predictions for this batch to prediction list
val_pred_list = np.concatenate(
[val_pred_list, y_pred.detach().cpu().numpy()], axis=0)
# Calculate the validation loss
val_loss += (loss * X_val_batch.size()[
0]).detach().cpu().numpy()
# Storing the predictions
self.val_predictions = val_pred_list
return val_loss / len(val_loader.dataset.tensors[0])
def evaluate(self, model, test_loader):
"""Evaluates the performance of the network on given data for a given
model.
A lot of overlap of code with validation. Only kept separate due to the
inspection of attributes being made easier when running simulations
if kept separate.
:param model: The model to evaluate
:type model: nn.module
:param test_loader: The iterator that feeds in the data of choice
:type test_loader: torch.utils.data.dataloader.DataLoader
:return: The error metric for that dataset
:rtype: float
"""
# Set the model to evaluate mode
model = model.eval()
test_loss = 0.
# Selecting the number of output features
if self.mtl:
test_pred_list = np.empty((0, self.y_test.shape[1]))
else:
test_pred_list = np.empty((0, 1))
# The data loader creates batches of data to validate
for X_test_batch, y_test_batch in test_loader:
# Ensures that the gradients are not updated
with torch.no_grad():
# Sending the data to GPU if available
X_test_batch = X_test_batch.to(self.device)
y_test_batch = y_test_batch.to(self.device)
# Perform forward pass
y_pred = model(X_test_batch)
# Calculate loss for the batch
loss = self.loss_function(y_pred, y_test_batch)
# Adding the predictions for this batch to prediction list
test_pred_list = np.concatenate(
[test_pred_list, y_pred.detach().cpu().numpy()], axis=0)
# Calculate the validation loss
test_loss += (loss * X_test_batch.size()[
0]).detach().cpu().numpy()
# Storing the predictions
self.test_predictions = test_pred_list
return test_loss / len(test_loader.dataset.tensors[0])
def live_pred_plot(self):
"""Plots the training predictions, validation predictions and the
training/validation losses as they are predicted.
"""
# More plots are required for MTL then single task
if self.mtl:
_, axes = plt.subplots(1, 5, figsize=(24, 5))
axes[0].set_title("Training Predictions")
axes[0].plot(self.train_predictions, label="Predicted")
axes[0].plot(self.y_train.numpy(), '--', label="Observed")
axes[0].legend()
axes[1].set_title("Validation Predictions")
axes[1].plot(self.val_predictions, label="Predicted")
axes[1].plot(self.y_val.numpy(), '--', label="Observed")
axes[1].legend()
axes[2].set_title("Loss Plots")
axes[2].plot(self.logs['Training Loss'], label="Training Loss")
axes[2].plot(self.logs['Validation Loss'], label="Validation Loss")
axes[2].legend()
axes[3].set_title("Single Metal Inspection Train")
axes[3].plot(self.train_predictions[:, 0], label="Predicted")
axes[3].plot(self.y_train.numpy()[:, 0], label="Observed")
axes[3].legend()
axes[4].set_title("Single Metal Inspection Val")
axes[4].plot(self.val_predictions[:, 0], label="Predicted")
axes[4].plot(self.y_val.numpy()[:, 0], label="Observed")
axes[4].legend()
plt.show()
else:
_, axes = plt.subplots(1, 3, figsize=(20, 5))
axes[0].set_title("Training Predictions")
axes[0].plot(self.train_predictions, label="Predicted")
axes[0].plot(self.y_train.numpy(), label="Observed")
axes[0].legend()
axes[1].set_title("Validation Predictions")
axes[1].plot(self.val_predictions, label="Predicted")
axes[1].plot(self.y_val.numpy(), label="Observed")
axes[1].legend()
axes[2].set_title("Loss Plots")
axes[2].plot(self.logs['Training Loss'], label="Training Loss")
axes[2].plot(self.logs['Validation Loss'], label="Validation Loss")
axes[2].legend()
plt.show()
def training_wrapper(self):
"""The wrapper that performs the training and validation"""
# start timer
start_time = time.time()
# set seed
set_seed(int(self.seed))
# Create data loaders
self.create_data_loaders()
train_log = []
val_log = []
# Begin training
for epoch in range(self.n_epochs):
# Train and validate the model
train_loss = self.train(self.train_loader)
val_loss = self.validate(self.val_loader)
# Saves a copy of the model if it improves on previous
# best val score
if val_loss <= self.best_val_score:
self.best_model = deepcopy(self.model)
self.best_val_score = val_loss
# Logging the performance of the models
train_log.append(train_loss)
val_log.append(val_loss)
self.logs["Training Loss"] = train_log
self.logs["Validation Loss"] = val_log
self.logs["Time"] = time.time() - start_time
# Checking stopping criteria
if self.early_stop:
self.early(val_loss)
if self.early.stop:
print("Early Stopping")
self.model = self.best_model
break
# Printing key metrics to screen
if self.disp_freq > 0:
if epoch % self.disp_freq == 0:
print("Epoch: %i "
"Train: %.5f "
"Val: %.5f "
"Time: %.3f "
"Best Val: %.5f"
% (epoch, train_loss, val_loss,
(time.time() - start_time),
self.best_val_score))
if self.fig_disp_freq > 0:
# Plotting predictions and training metrics
if epoch % self.fig_disp_freq == 0:
self.live_pred_plot()
# Storing the best model
self.model = self.best_model
def param_strip(param):
"""Strips the key text info out of certain parameters.
Used to save the text info of which models/optimiser objects are used
:param param: The parameter object to find the name of
:type param: object
"""
return str(param)[:str(param).find('(')]
def full_save(model, model_name, optimiser, num_epoch, learning_rate, momentum,
weight_decay, use_lg_returns,
PCA_used, data_X, train_loss, val_loss, test_loss, train_time,
hidden_dim, mse, mae, mde, path):
"""Saves the models run details and hyper-parameters to a csv file
:param model: The model run
:type model: nn.module
:param model_name: The name the model is saved under
:type model_name: strin
:param optimiser: The optimiser type used
:type optimiser: torch.optim
:param num_epoch: The number of epochs run for
:type num_epoch: int
:param learning_rate: The learning rate learning hyper-parameter
:type learning_rate: float
:param momentum: The momentum learning hyper-parameter
:type momentum: float
:param weight_decay: The weight decay learning hyper-parameter
:type weight_decay: float
:param use_lg_returns: Whether log returns was used
:type use_lg_returns: bool
:param PCA_used: Whether PCA was used
:type PCA_used: bool
:param data_X: The training dataset (used to save the shape)
:type data_X: np.array
:param train_loss: The loss on the training dataset
:type train_loss: float
:param val_loss: The loss on the validation dataset
:type val_loss: float
:param test_loss: The loss on the test dataset
:type test_loss: float
:param train_time: The amount of time to train
:type train_time: float
:param hidden_dim: The number of neurons in the hidden layers
:type hidden_dim: int
:param mse: The mean squared error metric
:type mse: floot
:param mae: The mean absolute error metric
:type mae: floot
:param mde: The mean direction error metric
:type mde: floot
:param path: The directory path to save in
:type path: string
"""
ind = ["Model Class",
"Optimiser",
"Epoch Number",
'Learning Rate',
"Momentum",
"Weight Decay",
"Log Returns Used",
"PCA",
"Num Features",
"Dataset Length",
"Series Length",
"Training Loss",
"Validation loss",
"Test Loss",
"Hidden Layer Dimensions",
"Mean Squared Error",
"Mean Absolute Error",
"Mean Directional Accuracy",
"Training Time"]
model_class = param_strip(model)
row = [model_class,
param_strip(optimiser),
num_epoch,
learning_rate,
momentum,
weight_decay,
use_lg_returns,
PCA_used,
data_X.shape[2],
data_X.shape[0],
data_X.shape[1],
train_loss,
val_loss,
test_loss,
hidden_dim,
mse,
mae,
mde,
train_time]
ind = [str(i) for i in ind]
row = [str(i) for i in row]
ind = [",".join(ind)]
row = [",".join(row)]
np.savetxt(
path + model_name + '_' + str(val_loss).replace(".", "_")[:5] + ".csv",
np.r_[ind, row], fmt='%s', delimiter=',')
return True | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/AppCenter/simpleui/static/admin/simpleui-x/elementui/umd/locale/fa.js | (function (global, factory) {
if (typeof define === "function" && define.amd) {
define('element/locale/fa', ['module', 'exports'], factory);
} else if (typeof exports !== "undefined") {
factory(module, exports);
} else {
var mod = {
exports: {}
};
factory(mod, mod.exports);
global.ELEMENT.lang = global.ELEMENT.lang || {};
global.ELEMENT.lang.fa = mod.exports;
}
})(this, function (module, exports) {
'use strict';
exports.__esModule = true;
exports.default = {
el: {
colorpicker: {
confirm: 'باشد',
clear: 'حذف'
},
datepicker: {
now: 'اکنون',
today: 'امروز',
cancel: 'لغو',
clear: 'حذف',
confirm: 'باشه',
selectDate: 'انتخاب تاریخ',
selectTime: 'انتخاب زمان',
startDate: 'تاریخ شروع',
startTime: 'زمان شروع',
endDate: 'تاریخ پایان',
endTime: 'زمان پایان',
prevYear: 'سال قبل',
nextYear: 'سال بعد',
prevMonth: 'ماه قبل',
nextMonth: 'ماه بعد',
year: 'سال',
month1: 'ژانویه',
month2: 'فوریه',
month3: 'مارس',
month4: 'آوریل',
month5: 'مه',
month6: 'ژوئن',
month7: 'جولای',
month8: 'اوت',
month9: 'سپتامبر',
month10: 'اکتبر',
month11: 'نوامبر',
month12: 'دسامبر',
// week: 'week',
weeks: {
sun: 'یکشنبه',
mon: 'دوشنبه',
tue: 'سهشنبه',
wed: 'چهارشنبه',
thu: 'پنجشنبه',
fri: 'جمعه',
sat: 'شنبه'
},
months: {
jan: 'ژانویه',
feb: 'فوریه',
mar: 'مارس',
apr: 'آوریل',
may: 'مه',
jun: 'ژوئن',
jul: 'جولای',
aug: 'اوت',
sep: 'سپتامبر',
oct: 'اکتبر',
nov: 'نوامبر',
dec: 'دسامبر'
}
},
select: {
loading: 'بارگیری',
noMatch: 'هیچ دادهای پیدا نشد',
noData: 'اطلاعاتی وجود ندارد',
placeholder: 'انتخاب کنید'
},
cascader: {
noMatch: 'هیچ دادهای پیدا نشد',
loading: 'بارگیری',
placeholder: 'انتخاب کنید',
noData: 'اطلاعاتی وجود ندارد'
},
pagination: {
goto: 'برو به',
pagesize: '/صفحه',
total: 'مجموع {total}',
pageClassifier: ''
},
messagebox: {
title: 'پیام',
confirm: 'باشه',
cancel: 'لغو',
error: 'ورودی غیر مجاز'
},
upload: {
deleteTip: 'برای پاک کردن حذف را فشار دهید',
delete: 'حذف',
preview: 'پیشنمایش',
continue: 'ادامه'
},
table: {
emptyText: 'اطلاعاتی وجود ندارد',
confirmFilter: 'تایید',
resetFilter: 'حذف',
clearFilter: 'همه',
sumText: 'جمع'
},
tree: {
emptyText: 'اطلاعاتی وجود ندارد'
},
transfer: {
noMatch: 'هیچ دادهای پیدا نشد',
noData: 'اطلاعاتی وجود ندارد',
titles: ['لیست 1', 'لیست 2'],
filterPlaceholder: 'کلید واژه هارو وارد کن',
noCheckedFormat: '{total} مورد',
hasCheckedFormat: '{checked} مورد از {total} مورد انتخاب شده است'
},
image: {
error: 'خطا در بارگیری تصویر'
},
pageHeader: {
title: 'بازگشت'
}
}
};
module.exports = exports['default'];
}); | PypiClean |
/Nuitka-1.8.tar.gz/Nuitka-1.8/nuitka/build/inline_copy/yaml_27/yaml/resolver.py | __all__ = ['BaseResolver', 'Resolver']
from error import *
from nodes import *
import re
class ResolverError(YAMLError):
pass
class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
yaml_implicit_resolvers = {}
yaml_path_resolvers = {}
def __init__(self):
self.resolver_exact_paths = []
self.resolver_prefix_paths = []
def add_implicit_resolver(cls, tag, regexp, first):
if not 'yaml_implicit_resolvers' in cls.__dict__:
implicit_resolvers = {}
for key in cls.yaml_implicit_resolvers:
implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
cls.yaml_implicit_resolvers = implicit_resolvers
if first is None:
first = [None]
for ch in first:
cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
# Note: `add_path_resolver` is experimental. The API could be changed.
# `new_path` is a pattern that is matched against the path from the
# root to the node that is being considered. `node_path` elements are
# tuples `(node_check, index_check)`. `node_check` is a node class:
# `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
# matches any kind of a node. `index_check` could be `None`, a boolean
# value, a string value, or a number. `None` and `False` match against
# any _value_ of sequence and mapping nodes. `True` matches against
# any _key_ of a mapping node. A string `index_check` matches against
# a mapping value that corresponds to a scalar key which content is
# equal to the `index_check` value. An integer `index_check` matches
# against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
for element in path:
if isinstance(element, (list, tuple)):
if len(element) == 2:
node_check, index_check = element
elif len(element) == 1:
node_check = element[0]
index_check = True
else:
raise ResolverError("Invalid path element: %s" % element)
else:
node_check = None
index_check = element
if node_check is str:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
and node_check is not None:
raise ResolverError("Invalid node checker: %s" % node_check)
if not isinstance(index_check, (basestring, int)) \
and index_check is not None:
raise ResolverError("Invalid index checker: %s" % index_check)
new_path.append((node_check, index_check))
if kind is str:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
raise ResolverError("Invalid node kind: %s" % kind)
cls.yaml_path_resolvers[tuple(new_path), kind] = tag
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
if not self.yaml_path_resolvers:
return
exact_paths = {}
prefix_paths = []
if current_node:
depth = len(self.resolver_prefix_paths)
for path, kind in self.resolver_prefix_paths[-1]:
if self.check_resolver_prefix(depth, path, kind,
current_node, current_index):
if len(path) > depth:
prefix_paths.append((path, kind))
else:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
for path, kind in self.yaml_path_resolvers:
if not path:
exact_paths[kind] = self.yaml_path_resolvers[path, kind]
else:
prefix_paths.append((path, kind))
self.resolver_exact_paths.append(exact_paths)
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
if not self.yaml_path_resolvers:
return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
def check_resolver_prefix(self, depth, path, kind,
current_node, current_index):
node_check, index_check = path[depth-1]
if isinstance(node_check, basestring):
if current_node.tag != node_check:
return
elif node_check is not None:
if not isinstance(current_node, node_check):
return
if index_check is True and current_index is not None:
return
if (index_check is False or index_check is None) \
and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
def resolve(self, kind, value, implicit):
if kind is ScalarNode and implicit[0]:
if value == u'':
resolvers = self.yaml_implicit_resolvers.get(u'', [])
else:
resolvers = self.yaml_implicit_resolvers.get(value[0], [])
resolvers += self.yaml_implicit_resolvers.get(None, [])
for tag, regexp in resolvers:
if regexp.match(value):
return tag
implicit = implicit[1]
if self.yaml_path_resolvers:
exact_paths = self.resolver_exact_paths[-1]
if kind in exact_paths:
return exact_paths[kind]
if None in exact_paths:
return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
return self.DEFAULT_SEQUENCE_TAG
elif kind is MappingNode:
return self.DEFAULT_MAPPING_TAG
class Resolver(BaseResolver):
pass
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
|\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
list(u'-+0123456789.'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:int',
re.compile(ur'''^(?:[-+]?0b[0-1_]+
|[-+]?0[0-7_]+
|[-+]?(?:0|[1-9][0-9_]*)
|[-+]?0x[0-9a-fA-F_]+
|[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
list(u'-+0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
[u'<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
re.compile(ur'''^(?: ~
|null|Null|NULL
| )$''', re.X),
[u'~', u'n', u'N', u''])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:timestamp',
re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
|[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
(?:[Tt]|[ \t]+)[0-9][0-9]?
:[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
(?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
list(u'0123456789'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
[u'='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:yaml',
re.compile(ur'^(?:!|&|\*)$'),
list(u'!&*')) | PypiClean |
/Kr0nOs-3.4.1.tar.gz/Kr0nOs-3.4.1/kronbot/core/utils/chat_formatting.py | import datetime
import itertools
from io import BytesIO
from typing import Iterator, List, Optional, Sequence, SupportsInt, Union
import discord
from babel.numbers import format_decimal
from kronbot.core.i18n import Translator, get_babel_regional_format
_ = Translator("UtilsChatFormatting", __file__)
def error(text: str) -> str:
"""Get text prefixed with an error emoji.
Returns
-------
str
The new message.
"""
return "\N{NO ENTRY SIGN} {}".format(text)
def warning(text: str) -> str:
"""Get text prefixed with a warning emoji.
Returns
-------
str
The new message.
"""
return "\N{WARNING SIGN} {}".format(text)
def info(text: str) -> str:
"""Get text prefixed with an info emoji.
Returns
-------
str
The new message.
"""
return "\N{INFORMATION SOURCE} {}".format(text)
def question(text: str) -> str:
"""Get text prefixed with a question emoji.
Returns
-------
str
The new message.
"""
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
def bold(text: str) -> str:
"""Get the given text in bold.
Note: This escapes text prior to bolding.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=True)
return "**{}**".format(text)
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}\n```".format(lang, text)
return ret
def inline(text: str) -> str:
"""Get the given text as inline code.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
if "`" in text:
return "``{}``".format(text)
else:
return "`{}`".format(text)
def italics(text: str) -> str:
"""Get the given text in italics.
Note: This escapes text prior to italicising
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=True)
return "*{}*".format(text)
def bordered(*columns: Sequence[str], ascii_border: bool = False) -> str:
"""Get two blocks of text in a borders.
Note
----
This will only work with a monospaced font.
Parameters
----------
*columns : `sequence` of `str`
The columns of text, each being a list of lines in that column.
ascii_border : bool
Whether or not the border should be pure ASCII.
Returns
-------
str
The bordered text.
"""
borders = {
"TL": "-" if ascii_border else "┌", # Top-left
"TR": "-" if ascii_border else "┐", # Top-right
"BL": "-" if ascii_border else "└", # Bottom-left
"BR": "-" if ascii_border else "┘", # Bottom-right
"HZ": "-" if ascii_border else "─", # Horizontal
"VT": "|" if ascii_border else "│", # Vertical
}
sep = " " * 4 # Separator between boxes
widths = tuple(max(len(row) for row in column) + 9 for column in columns) # width of each col
colsdone = [False] * len(columns) # whether or not each column is done
lines = [sep.join("{TL}" + "{HZ}" * width + "{TR}" for width in widths)]
for line in itertools.zip_longest(*columns):
row = []
for colidx, column in enumerate(line):
width = widths[colidx]
done = colsdone[colidx]
if column is None:
if not done:
# bottom border of column
column = "{HZ}" * width
row.append("{BL}" + column + "{BR}")
colsdone[colidx] = True # mark column as done
else:
# leave empty
row.append(" " * (width + 2))
else:
column += " " * (width - len(column)) # append padded spaces
row.append("{VT}" + column + "{VT}")
lines.append(sep.join(row))
final_row = []
for width, done in zip(widths, colsdone):
if not done:
final_row.append("{BL}" + "{HZ}" * width + "{BR}")
else:
final_row.append(" " * (width + 2))
lines.append(sep.join(final_row))
return "\n".join(lines).format(**borders)
def pagify(
text: str,
delims: Sequence[str] = ["\n"],
*,
priority: bool = False,
escape_mass_mentions: bool = True,
shorten_by: int = 8,
page_length: int = 2000,
) -> Iterator[str]:
"""Generate multiple pages from the given text.
Note
----
This does not respect code blocks or inline code.
Parameters
----------
text : str
The content to pagify and send.
delims : `sequence` of `str`, optional
Characters where page breaks will occur. If no delimiters are found
in a page, the page will break after ``page_length`` characters.
By default this only contains the newline.
Other Parameters
----------------
priority : `bool`
Set to :code:`True` to choose the page break delimiter based on the
order of ``delims``. Otherwise, the page will always break at the
last possible delimiter.
escape_mass_mentions : `bool`
If :code:`True`, any mass mentions (here or everyone) will be
silenced.
shorten_by : `int`
How much to shorten each page by. Defaults to 8.
page_length : `int`
The maximum length of each page. Defaults to 2000.
Yields
------
`str`
Pages of the given text.
"""
in_text = text
page_length -= shorten_by
while len(in_text) > page_length:
this_page_len = page_length
if escape_mass_mentions:
this_page_len -= in_text.count("@here", 0, page_length) + in_text.count(
"@everyone", 0, page_length
)
closest_delim = (in_text.rfind(d, 1, this_page_len) for d in delims)
if priority:
closest_delim = next((x for x in closest_delim if x > 0), -1)
else:
closest_delim = max(closest_delim)
closest_delim = closest_delim if closest_delim != -1 else this_page_len
if escape_mass_mentions:
to_send = escape(in_text[:closest_delim], mass_mentions=True)
else:
to_send = in_text[:closest_delim]
if len(to_send.strip()) > 0:
yield to_send
in_text = in_text[closest_delim:]
if len(in_text.strip()) > 0:
if escape_mass_mentions:
yield escape(in_text, mass_mentions=True)
else:
yield in_text
def strikethrough(text: str) -> str:
"""Get the given text with a strikethrough.
Note: This escapes text prior to applying a strikethrough
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=True)
return "~~{}~~".format(text)
def underline(text: str) -> str:
"""Get the given text with an underline.
Note: This escapes text prior to underlining
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
text = escape(text, formatting=True)
return "__{}__".format(text)
def escape(text: str, *, mass_mentions: bool = False, formatting: bool = False) -> str:
"""Get text with all mass mentions or markdown escaped.
Parameters
----------
text : str
The text to be escaped.
mass_mentions : `bool`, optional
Set to :code:`True` to escape mass mentions in the text.
formatting : `bool`, optional
Set to :code:`True` to escpae any markdown formatting in the text.
Returns
-------
str
The escaped text.
"""
if mass_mentions:
text = text.replace("@everyone", "@\u200beveryone")
text = text.replace("@here", "@\u200bhere")
if formatting:
text = discord.utils.escape_markdown(text)
return text
def humanize_list(items: Sequence[str]) -> str:
"""Get comma-separted list, with the last element joined with *and*.
This uses an Oxford comma, because without one, items containing
the word *and* would make the output difficult to interpret.
Parameters
----------
items : Sequence[str]
The items of the list to join together.
Raises
------
IndexError
An empty sequence was passed
Examples
--------
.. testsetup::
from kronbot.core.utils.chat_formatting import humanize_list
.. doctest::
>>> humanize_list(['One', 'Two', 'Three'])
'One, Two, and Three'
>>> humanize_list(['One'])
'One'
"""
if len(items) == 1:
return items[0]
try:
return ", ".join(items[:-1]) + _(", and ") + items[-1]
except IndexError:
raise IndexError("Cannot humanize empty sequence") from None
def format_perms_list(perms: discord.Permissions) -> str:
"""Format a list of permission names.
This will return a humanized list of the names of all enabled
permissions in the provided `discord.Permissions` object.
Parameters
----------
perms : discord.Permissions
The permissions object with the requested permissions to list
enabled.
Returns
-------
str
The humanized list.
"""
perm_names: List[str] = []
for perm, value in perms:
if value is True:
perm_name = '"' + perm.replace("_", " ").title() + '"'
perm_names.append(perm_name)
return humanize_list(perm_names).replace("Guild", "Server")
def humanize_timedelta(
*, timedelta: Optional[datetime.timedelta] = None, seconds: Optional[SupportsInt] = None
) -> str:
"""
Get a locale aware human timedelta representation.
This works with either a timedelta object or a number of seconds.
Fractional values will be omitted, and values less than 1 second
an empty string.
Parameters
----------
timedelta: Optional[datetime.timedelta]
A timedelta object
seconds: Optional[SupportsInt]
A number of seconds
Returns
-------
str
A locale aware representation of the timedelta or seconds.
Raises
------
ValueError
The function was called with neither a number of seconds nor a timedelta object
"""
try:
obj = seconds if seconds is not None else timedelta.total_seconds()
except AttributeError:
raise ValueError("You must provide either a timedelta or a number of seconds")
seconds = int(obj)
periods = [
(_("year"), _("years"), 60 * 60 * 24 * 365),
(_("month"), _("months"), 60 * 60 * 24 * 30),
(_("day"), _("days"), 60 * 60 * 24),
(_("hour"), _("hours"), 60 * 60),
(_("minute"), _("minutes"), 60),
(_("second"), _("seconds"), 1),
]
strings = []
for period_name, plural_period_name, period_seconds in periods:
if seconds >= period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 0:
continue
unit = plural_period_name if period_value > 1 else period_name
strings.append(f"{period_value} {unit}")
return ", ".join(strings)
def humanize_number(val: Union[int, float], override_locale=None) -> str:
"""
Convert an int or float to a str with digit separators based on bot locale
Parameters
----------
val : Union[int, float]
The int/float to be formatted.
override_locale: Optional[str]
A value to override bot's regional format.
Returns
-------
str
locale aware formatted number.
"""
return format_decimal(val, locale=get_babel_regional_format(override_locale))
def text_to_file(
text: str, filename: str = "file.txt", *, spoiler: bool = False, encoding: str = "utf-8"
):
"""Prepares text to be sent as a file on Discord, without character limit.
This writes text into a bytes object that can be used for the ``file`` or ``files`` parameters
of :meth:`discord.abc.Messageable.send`.
Parameters
----------
text: str
The text to put in your file.
filename: str
The name of the file sent. Defaults to ``file.txt``.
spoiler: bool
Whether the attachment is a spoiler. Defaults to ``False``.
Returns
-------
discord.File
The file containing your text.
"""
file = BytesIO(text.encode(encoding))
return discord.File(file, filename, spoiler=spoiler) | PypiClean |
/MutPy-Pynguin-0.7.1.tar.gz/MutPy-Pynguin-0.7.1/mutpy/views.py | import ast
import datetime
import inspect
import os
import traceback
from difflib import unified_diff
import jinja2
import yaml
from mutpy import codegen, termcolor, utils
class ViewNotifier:
PREFIX = 'notify_'
def __init__(self, views):
self.views = views
def add_view(self, views):
self.views.append(views)
def del_view(self, views):
self.views.remove(views)
def notify_all_views(self, notify, *args, **kwargs):
for views in self.views:
if hasattr(views, notify):
attr = getattr(views, notify)
attr(*args, **kwargs)
def __getattr__(self, name):
if name.startswith(ViewNotifier.PREFIX):
notify = name[len(ViewNotifier.PREFIX):]
return lambda *args, **kwargs: self.notify_all_views(notify, *args, **kwargs)
else:
raise AttributeError(name)
class QuietTextView:
def __init__(self, colored_output=False):
self.colored_output = colored_output
def end(self, score, duration):
self.level_print('Mutation score {}: {}'.format(
self.time_format(duration),
self.decorate('{:.1f}%'.format(score.count()), 'blue', attrs=['bold']),
))
def level_print(self, msg, level=1, ended=True, continuation=False):
end = "\n" if ended else ""
if continuation:
print(msg, end=end)
else:
if level == 1:
prefix = self.decorate('[*]', 'blue')
elif level == 2:
prefix = self.decorate(' -', 'cyan')
print('{} {}'.format(prefix, msg), end=end)
def decorate(self, text, color=None, on_color=None, attrs=None):
if self.colored_output:
return termcolor.colored(text, color, on_color, attrs)
else:
return text
@staticmethod
def time_format(time=None):
if time is None:
return '[ - ]'
else:
return '[{:.5f} s]'.format(time)
class TextView(QuietTextView):
def __init__(self, colored_output=False, show_mutants=False):
super().__init__(colored_output)
self.show_mutants = show_mutants
def initialize(self, targets, tests):
self.level_print('Start mutation process:')
self.level_print('targets: {}'.format(', '.join(targets)), 2)
self.level_print('tests: {}'.format(', '.join(tests)), 2)
def start(self):
self.level_print('Start mutants generation and execution:')
def end(self, score, duration):
super().end(score, duration)
self.level_print('all: {}'.format(score.all_mutants), 2)
if score.all_mutants:
self.level_print('killed: {} ({:.1f}%)'.format(score.killed_mutants,
100 * score.killed_mutants / score.all_mutants), 2)
self.level_print('survived: {} ({:.1f}%)'.format(score.survived_mutants,
100 * score.survived_mutants / score.all_mutants), 2)
self.level_print('incompetent: {} ({:.1f}%)'.format(score.incompetent_mutants,
100 * score.incompetent_mutants / score.all_mutants), 2)
self.level_print('timeout: {} ({:.1f}%)'.format(score.timeout_mutants,
100 * score.timeout_mutants / score.all_mutants), 2)
if score.all_nodes:
self.level_print('Coverage: {} of {} AST nodes ({:.1f}%)'.format(
score.covered_nodes, score.all_nodes,
100 * score.covered_nodes / score.all_nodes
))
def passed(self, tests, number_of_tests):
self.level_print('{} tests passed:'.format(number_of_tests))
for test, target, time in tests:
test_name = test.__name__ + ('.' + target if target else '')
self.level_print('{} {}'.format(test_name, self.time_format(time)), 2)
def original_tests_fail(self, result):
self.level_print(self.decorate('Tests failed:', 'red', attrs=['bold']))
for fail in result.failed:
self.level_print('fail in {} - {}'.format(fail.name, fail.short_message), 2)
if result.is_incompetent():
self.level_print(str(result.get_exception()), 2)
def mutation(self, number, mutations, module, mutant):
for mutation in mutations:
self.level_print(
'[#{:>4}] {:<3} {}: '.format(number, mutation.operator.name(), module.__name__),
ended=False,
level=2,
)
if mutation != mutations[-1]:
print()
if self.show_mutants:
self.print_code(mutant, ast.parse(inspect.getsource(module)))
def cant_load(self, name, exception):
self.level_print(self.decorate('Can\'t load module: ', 'red', attrs=['bold']) + '{} ({}: {})'.format(name,
exception.__class__.__name__,
exception))
def print_code(self, mutant, original):
mutant_src = codegen.to_source(mutant)
mutant_src = codegen.add_line_numbers(mutant_src)
original_src = codegen.to_source(original)
original_src = codegen.add_line_numbers(original_src)
self._print_diff(mutant_src, original_src)
def _print_diff(self, mutant_src, original_src):
diff = self._create_diff(mutant_src, original_src)
diff = [line for line in diff if not line.startswith(('---', '+++', '@@'))]
diff = [self.decorate(line, 'blue') if line.startswith('- ') else line for line in diff]
diff = [self.decorate(line, 'green') if line.startswith('+ ') else line for line in diff]
print("\n{}\n".format('-' * 80) + "\n".join(diff) + "\n{}".format('-' * 80))
@staticmethod
def _create_diff(mutant_src, original_src):
return list(unified_diff(original_src.split('\n'), mutant_src.split('\n'), n=4, lineterm=''))
def killed(self, time, killer, *args, **kwargs):
self.level_print(self.time_format(time) + ' ' + self.decorate('killed', 'green') + ' by ' + str(killer),
continuation=True)
def survived(self, time, *args, **kwargs):
self.level_print(self.time_format(time) + ' ' + self.decorate('survived', 'red'), continuation=True)
def timeout(self, time, *args, **kwargs):
self.level_print(self.time_format(time) + ' ' + self.decorate('timeout', 'yellow'), continuation=True)
def incompetent(self, time, *args, **kwargs):
self.level_print(self.time_format(time) + ' ' + self.decorate('incompetent', 'cyan'), continuation=True)
class DebugView:
def print_exception(self, exception):
print("\n" + "".join(traceback.format_exception(None, exception, None)))
def incompetent(self, time, exception, tests_run, *args, **kwargs):
self.print_exception(exception)
def killed(self, time, killer, exception_traceback, *args, **kwargs):
print('\n' + exception_traceback)
class AccReportView:
def __init__(self):
self.mutation_info = []
def initialize(self, target, tests):
self.target = target
def passed(self, tests, number_of_tests):
self.tests = tests
self.number_of_tests = number_of_tests
def mutation(self, number, mutations, module, mutant):
mutations = [{'operator': mutation.operator.name(), 'lineno': mutation.node.lineno} for mutation in mutations]
self.current_mutation = {
'number': number,
'mutations': mutations,
'module': module,
}
def killed(self, time, killer, exception_traceback, tests_run, *args, **kwargs):
self.end_mutation(
'killed',
time=time,
killer=str(killer),
tests_run=tests_run,
exception_traceback=exception_traceback,
)
def survived(self, time, tests_run, *args, **kwargs):
self.end_mutation('survived', time=time, tests_run=tests_run)
def incompetent(self, time, exception, tests_run, *args, **kwargs):
self.end_mutation('incompetent', time=time, tests_run=tests_run)
def timeout(self, time, *args, **kwargs):
self.end_mutation('timeout', time=time)
def end_mutation(self, status, time=None, killer=None, tests_run=None, exception_traceback=None):
self.current_mutation['status'] = status
self.current_mutation['time'] = time
self.current_mutation['killer'] = killer
self.current_mutation['tests_run'] = tests_run
self.current_mutation['exception_traceback'] = exception_traceback
self.mutation_info.append(self.current_mutation)
class YAMLReportView(AccReportView):
def __init__(self, file_name):
super().__init__()
self.file_name = file_name
def end(self, score, duration):
with open(self.file_name, 'w') as report_file:
yaml.dump({
'targets': self.target,
'tests': [{'name': test.__name__, 'target': target, 'time': time} for test, target, time in self.tests],
'number_of_tests': self.number_of_tests,
'mutations': self.mutation_info,
'total_time': duration,
'time_stats': dict(utils.TimeRegister.executions),
'mutation_score': score.count(),
'coverage': {
'covered_nodes': score.covered_nodes,
'all_nodes': score.all_nodes,
}
}, report_file, default_flow_style=False)
class HTMLReportView(AccReportView):
def __init__(self, dir_name):
super().__init__()
self.dir_name = dir_name
os.makedirs(dir_name, exist_ok=True)
os.makedirs(os.path.join(dir_name, 'mutants'), exist_ok=True)
templates_path = os.path.join(os.path.dirname(__file__), 'templates')
self.env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=templates_path))
def mutation(self, number, mutations, module, mutant):
super().mutation(number, mutations, module, mutant)
self.current_mutation['mutant'] = mutant
def end_mutation(self, *args, **kwargs):
super().end_mutation(*args, **kwargs)
template = self.env.get_template('detail.html')
context = {
'mutant_code': codegen.to_source(self.current_mutation['mutant']),
}
context.update(self.current_mutation)
report = template.render(context)
file_path = os.path.join(self.dir_name, 'mutants', '{}.html'.format(self.current_mutation['number']))
with open(file_path, 'w') as report_file:
report_file.write(report)
def end(self, score, duration):
template = self.env.get_template('index.html')
context = {
'targets': self.target,
'tests': self.tests,
'number_of_tests': self.number_of_tests,
'score': score,
'duration': duration,
'mutations': self.mutation_info,
'date_now': datetime.datetime.now(),
}
report = template.render(context)
file_path = os.path.join(self.dir_name, 'index.html')
with open(file_path, 'w') as report_file:
report_file.write(report) | PypiClean |
/Interplanetary_Invaders-0.7-py3-none-any.whl/interplanetary_invaders/scripts/vehicles.py | import pygame, math
from math import radians, sin, cos
from pygame.math import Vector2
from interplanetary_invaders.scripts.lasers import *
from interplanetary_invaders.scripts.sound import Sound
from interplanetary_invaders.scripts.utils import fix_path
from interplanetary_invaders.scripts import joystick
pygame.init()
class Zapper:
"""Zapper (Player) class"""
def __init__(self, pos, images, mission, dry = False):
self.dry = dry
self.pos = pos
self.images = images
self.speed = 1 # Acceleration multiplier
self.acceleration = 1
self.fire_per_sec = 4.5
self.completeness = 1
self.title = "Alien Zapper 1"
self.name = "zapper"
self.time_passed = 0
self.lasers = []
self.size = (64, 64)
self.boom_size = (200, 200)
self.rect = self.get_rect()
self.health = 1
self.shield = 0
self.max_health = 1
self.max_shield = 1
self.hover_alt = 0
self.min_alt = 0
self.target_alt = 0
self.standard_alt = 0
self.alt_change_speed = 5
self.hitBy = []
self.items = []
self.current_items = {}
self.fireObject = Laser
self.dead = False
self.kill = False
self.frame = 1
self.frame_rate = 1 / 25
self.frame_time = 0
self.friction = 1
self.velocity = pygame.Vector2((0, 0))
self.max_velocity = 6
self.target_x_velocity = 0
self.mission = mission
self.mass = 1.5 # Mass in kilograms
self.death_frames = 25
self.max_temp = 300 # Works up to 300 deg. F
self.canFire = True
self.fire_from = "center"
self.beam = False
self.rotation = 0
self.rotation_speed = 90
self.max_rotation = 60
self.add_temp = 0
self.rotate = False
self.whimp = False
self.regen = False
self.hover = False
self.extra_dip = 0
self.send_to_ground()
self.regen_time = 0
self.just_fired = False
self.death_sound = None
self.weapon_power = 1
self.always_beam = False
self.general_acc = 20
self.im_back = False
self.invincible = False
self.invincible_time = 0
self.even_frame = True
self.damage_sound = Sound("audio/takeDamage.wav", True)
self.fire_sound_pitches = (.95, 1, 1.05)
def send_to_ground(self):
if self.dry:
return
rect = self.get_rect()
if not self.hover:
rect.bottom = self.mission.ground
else:
rect.bottom = self.hover_alt
self.standard_alt = rect.bottom
self.target_alt = self.standard_alt
self.min_alt = self.mission.ground
self.pos[1] = rect.top
if not self.hover and self.mission.is_bottomless():
self.dead = True
self.health = 0
self.shield = 0
if self.hover and self.mission.is_bottomless():
self.min_alt += self.extra_dip
def get_rect(self):
"""Returns a pygame.Rect object representing the player"""
rect = pygame.Rect((0, 0), self.size)
rect.topleft = self.pos
return rect
def make_invincible(self, no_sound=False):
if self.dead:
return
self.invincible = True
self.invincible_time = .75
if not no_sound:
self.damage_sound.play()
def draw(self, surf):
"""Render the Zapper"""
if not self.dead:
if self.invincible and not self.even_frame:
return
if self.rotate:
line_len = 50
start_pos = Vector2(self.fire_from) + self.pos
end_pos = start_pos - Vector2(cos(radians(self.rotation + 90)) * line_len, sin(radians(self.rotation + 90)) * line_len)
pygame.draw.line(surf, (255, 0, 0), start_pos, end_pos, 1)
name = f"{self.name}{int(6 * (self.completeness / 1))}"
if name in self.images:
self.image=pygame.transform.scale(self.images[name], self.size)
if self.rotate and self.completeness == 1 or not name in self.images:
lr = ""
if self.rotation >= 45:
lr = "R"
if self.rotation <= -45:
lr = "L"
surf.blit(pygame.transform.scale(self.images[self.name + lr], self.size), self.pos)
else:
surf.blit(self.image, self.pos)
if self.dead:
rect = self.get_rect()
rect.size = self.boom_size
rect.center = self.get_rect().center
surf.blit(pygame.transform.scale(self.images[f"{self.name}Die{self.frame}"], self.boom_size), rect)
def fire(self):
self.completeness = 0
center = Vector2(self.get_rect().topleft)
if self.fire_from != "center":
center += Vector2((self.fire_from))
else:
center += (Vector2(self.size) / 2)
obj = self.fireObject(center, self.images, self.rotation)
obj.damage *= self.weapon_power
if self.whimp:
obj.damage /= 10
self.lasers.append(obj)
if not self.whimp:
pitch = random.choice(self.fire_sound_pitches)
obj.fire_sound.__init__(obj.fire_sound.original_filename, pitch=pitch)
obj.fire_sound.play()
return obj
def events(self, event):
"""Handle user events for Zapper"""
if (event.type == pygame.KEYDOWN or joystick.WasEvent()) and not self.dead:
if (event.key in (pygame.K_SPACE, pygame.K_UP, pygame.K_w) or joystick.JustPressedX()) and self.completeness == 1 and self.canFire:
obj = self.fire()
self.just_fired = obj
def update(self, time_passed):
"""Update Zapper"""
self.even_frame = not self.even_frame
if self.invincible:
self.invincible_time -= time_passed
if self.invincible_time < 0:
self.invincible = False
for item in self.current_items:
self.current_items[item].total_time += time_passed
self.just_fired = False
if self.dead and self.health > 0:
self.im_back = True
self.dead = False
self.frame = 1
if self.regen:
self.regen_time += time_passed
t = 1
if self.shield == 0:
t = 2
if self.regen_time >= t:
self.regen_time = 0
self.shield += .1
if self.shield > self.max_shield:
self.shield = self.max_shield
pressed = pygame.key.get_pressed()
if (pressed[pygame.K_SPACE] or pressed[pygame.K_w] or pressed[pygame.K_UP] or joystick.CurrentState.X) and self.completeness == 1 and self.canFire and self.beam and not self.dead:
obj = self.fire()
if not self.whimp:
self.just_fired = obj
if pressed[pygame.K_q] and self.rotate:
self.rotation -= self.rotation_speed * time_passed
if joystick.CurrentState.LT_val >= .05 and self.rotate:
self.rotation -= self.rotation_speed * time_passed * joystick.CurrentState.LT_val
if pressed[pygame.K_e] and self.rotate:
self.rotation += self.rotation_speed * time_passed
if joystick.CurrentState.RT_val >= .05 and self.rotate:
self.rotation += self.rotation_speed * time_passed * joystick.CurrentState.RT_val
if self.rotation < -self.max_rotation:
self.rotation = -self.max_rotation
if self.rotation > self.max_rotation:
self.rotation = self.max_rotation
if self.dead:
if self.frame_rate <= self.frame_time:
self.frame += 1
self.frame_time = 0
if self.frame > self.death_frames:
self.kill = True
self.frame = 25
self.frame_time += time_passed
self.time_passed = time_passed
if self.completeness < 1:
self.completeness += time_passed * self.fire_per_sec
if self.completeness > 1:
self.completeness = 1
if not self.beam:
self.image = pygame.transform.scale(self.images[\
f"{self.name}{int(6 * (self.completeness / 1))}"], self.size)
acc = True
if (pressed[pygame.K_LEFT] or pressed[pygame.K_a] or joystick.CurrentState.joystick_left) and not self.dead:
self.target_x_velocity = -self.max_velocity
elif (pressed[pygame.K_RIGHT] or pressed[pygame.K_d] or joystick.CurrentState.joystick_right) and not self.dead:
self.target_x_velocity = self.max_velocity
else:
acc = False
self.target_x_velocity = 0
if acc:
self.velocity.x -= (self.velocity.x - self.target_x_velocity) * self.speed * self.friction * (1 if self.hover else self.mission.friction * self.mission.traction) * self.acceleration * self.general_acc * self.time_passed
if not acc:
self.velocity.x -= (self.velocity.x - self.target_x_velocity) * self.friction * (1 if self.hover else self.mission.friction) * self.general_acc * self.time_passed
if self.velocity.x > self.max_velocity:
self.velocity.x = self.max_velocity
if self.velocity.x < -self.max_velocity:
self.velocity.x = -self.max_velocity
if abs(self.velocity.x) < .01 and not acc:
self.velocity.x = 0
if self.pos[0] <= 0 and self.velocity.x < 0:
self.velocity.x = abs(self.velocity.x) * .2
if self.rect.right >= 800 and self.velocity.x > 0:
self.velocity.x = -abs(self.velocity.x) * .2
# if self.pos[0] < 0:
# self.pos[0] = 0
# self.velocity[0] = 0
# if self.pos[0] > 736:
# self.pos[0] = 736
# self.velocity[0] = 0
if self.mission.temperature + self.add_temp > self.max_temp and not self.dead:
remove = ((self.mission.temperature + self.add_temp - self.max_temp) / 1000) * self.time_passed
if self.shield > 0:
self.shield -= remove
else:
self.health -= remove
if self.shield < 0:
self.shield = 0
self.pos[0] += self.velocity[0]
self.pos[1] += self.velocity[1]
self.rect = self.get_rect()
if not self.dead:
self.rect.bottom -= ((self.rect.bottom - self.target_alt) * self.alt_change_speed * self.time_passed)
self.pos[1] = self.rect.top
self.rect = self.get_rect()
def goToGround(self):
self.target_alt = self.min_alt
def goBackUp(self):
self.target_alt = self.standard_alt
class VenusCrawler(Zapper):
def __init__(self, pos, images, mission, dry = False):
super().__init__(pos, images, mission, dry)
self.title = "Venus Crawler"
self.name = "venus_crawler"
self.death_frames = 15
self.mass = 3
self.speed = 20
self.health = 2
self.friction = 1.5
self.max_health = 2
self.max_shield = 2
self.max_temp = 950 # Works up to 950 deg. F
self.fire_from = (22, 32)
self.fire_per_sec = 5
self.send_to_ground()
class Curiosity(Zapper):
def __init__(self, pos, images, mission, dry = False):
super().__init__(pos, images, mission, dry)
self.title = "Curiosity"
self.name = "curiosity"
self.death_frames = 15
self.size = (62, 38)
self.boom_size = (256, 256)
self.fire_from = (40, 6)
self.fire_per_sec = 50
self.health = .5
self.speed *= 1.3
self.max_health = .5
self.shield = .5
self.max_shield = .5
self.rect = self.get_rect()
self.always_beam = True
self.beam = True
self.rotate = True
self.whimp = True
self.send_to_ground()
self.mass = 2
class JupiterHover(Zapper):
def __init__(self, pos, images, mission, dry = False):
super().__init__(pos, images, mission, dry)
self.title = "Jupiter Hovercraft"
self.name = "jupiter_hover"
self.fire_per_sec = 6.5
self.death_frames = 21
self.size = (32, 32)
self.boom_size = (256, 256)
self.fire_from = (24, 5)
self.health = .75
self.max_health = .75
self.shield = 0
self.max_shield = 2
self.rect = self.get_rect()
self.hover = True
self.rotate = True
self.mass = .75
self.max_temp = 500
self.hover_alt = 430
self.frame_rate = 1/35
self.extra_dip = 10
self.speed *= .8
self.friction = .5
self.send_to_ground()
self.weapon_power = .75
self.death_sound = Sound(fix_path("audio/jupiter_hover_explode.wav"), True) | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/standard/lang/eu.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['eu']={"editor":"Testu aberastuaren editorea","editorPanel":"Testu aberastuaren editorearen panela","common":{"editorHelp":"Sakatu ALT 0 laguntza jasotzeko","browseServer":"Arakatu zerbitzaria","url":"URLa","protocol":"Protokoloa","upload":"Kargatu","uploadSubmit":"Bidali zerbitzarira","image":"Irudia","flash":"Flash","form":"Formularioa","checkbox":"Kontrol-laukia","radio":"Aukera-botoia","textField":"Testu-eremua","textarea":"Testu-area","hiddenField":"Ezkutuko eremua","button":"Botoia","select":"Hautespen-eremua","imageButton":"Irudi-botoia","notSet":"<ezarri gabe>","id":"Id","name":"Izena","langDir":"Hizkuntzaren norabidea","langDirLtr":"Ezkerretik eskuinera (LTR)","langDirRtl":"Eskuinetik ezkerrera (RTL)","langCode":"Hizkuntzaren kodea","longDescr":"URLaren deskribapen luzea","cssClass":"Estilo-orriko klaseak","advisoryTitle":"Aholkatutako izenburua","cssStyle":"Estiloa","ok":"Ados","cancel":"Utzi","close":"Itxi","preview":"Aurrebista","resize":"Aldatu tamainaz","generalTab":"Orokorra","advancedTab":"Aurreratua","validateNumberFailed":"Balio hau ez da zenbaki bat.","confirmNewPage":"Eduki honetan gorde gabe dauden aldaketak galduko dira. Ziur zaude orri berri bat kargatu nahi duzula?","confirmCancel":"Aukera batzuk aldatu dituzu. Ziur zaude elkarrizketa-koadroa itxi nahi duzula?","options":"Aukerak","target":"Helburua","targetNew":"Leiho berria (_blank)","targetTop":"Goieneko leihoan (_top)","targetSelf":"Leiho berean (_self)","targetParent":"Leiho gurasoan (_parent)","langDirLTR":"Ezkerretik eskuinera (LTR)","langDirRTL":"Eskuinetik ezkerrera (RTL)","styles":"Estiloa","cssClasses":"Estilo-orriko klaseak","width":"Zabalera","height":"Altuera","align":"Lerrokatzea","left":"Ezkerrean","right":"Eskuinean","center":"Erdian","justify":"Justifikatu","alignLeft":"Lerrokatu ezkerrean","alignRight":"Lerrokatu eskuinean","alignCenter":"Align Center","alignTop":"Goian","alignMiddle":"Erdian","alignBottom":"Behean","alignNone":"Bat ere ez","invalidValue":"Balio desegokia.","invalidHeight":"Altuera zenbaki bat izan behar da.","invalidWidth":"Zabalera zenbaki bat izan behar da.","invalidLength":"Value specified for the \"%1\" field must be a positive number with or without a valid measurement unit (%2).","invalidCssLength":"\"%1\" eremurako zehaztutako balioak zenbaki positibo bat izan behar du, CSS neurri unitate batekin edo gabe (px, %, in, cm, mm, em, ex, pt edo pc).","invalidHtmlLength":"\"%1\" eremurako zehaztutako balioak zenbaki positibo bat izan behar du, HTML neurri unitate batekin edo gabe (px edo %).","invalidInlineStyle":"Lineako estiloan zehaztutako balioak \"izen : balio\" formatuko tupla bat edo gehiago izan behar dira, komaz bereiztuak.","cssLengthTooltip":"Sartu zenbaki bat edo zenbaki bat baliozko CSS unitate batekin (px, %, in, cm, mm, em, ex, pt, edo pc).","unavailable":"%1<span class=\"cke_accessibility\">, erabilezina</span>","keyboard":{"8":"Atzera tekla","13":"Sartu","16":"Maius","17":"Ktrl","18":"Alt","32":"Zuriunea","35":"Buka","36":"Etxea","46":"Ezabatu","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Komandoa"},"keyboardShortcut":"Laster-tekla","optionDefault":"Lehenetsia"},"about":{"copy":"Copyright © $1. Eskubide guztiak erreserbaturik.","dlgTitle":"CKEditor 4ri buruz","moreInfo":"Lizentziari buruzko informazioa gure webgunean:"},"basicstyles":{"bold":"Lodia","italic":"Etzana","strike":"Marratua","subscript":"Azpi-indizea","superscript":"Goi-indizea","underline":"Azpimarratu"},"blockquote":{"toolbar":"Aipamen blokea"},"notification":{"closed":"Jakinarazpena itxita."},"toolbar":{"toolbarCollapse":"Tolestu tresna-barra","toolbarExpand":"Zabaldu tresna-barra","toolbarGroups":{"document":"Dokumentua","clipboard":"Arbela/Desegin","editing":"Editatu","forms":"Formularioak","basicstyles":"Oinarrizko estiloak","paragraph":"Paragrafoa","links":"Estekak","insert":"Txertatu","styles":"Estiloak","colors":"Koloreak","tools":"Tresnak"},"toolbars":"Editorearen tresna-barrak"},"clipboard":{"copy":"Kopiatu","copyError":"Zure web nabigatzailearen segurtasun ezarpenek ez dute baimentzen testuak automatikoki kopiatzea. Mesedez teklatua erabil ezazu (Ctrl/Cmd+C).","cut":"Ebaki","cutError":"Zure web nabigatzailearen segurtasun ezarpenek ez dute baimentzen testuak automatikoki moztea. Mesedez teklatua erabil ezazu (Ctrl/Cmd+X).","paste":"Itsatsi","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"Itsasteko area","pasteMsg":"Paste your content inside the area below and press OK."},"contextmenu":{"options":"Testuinguru-menuaren aukerak"},"elementspath":{"eleLabel":"Elementuen bidea","eleTitle":"%1 elementua"},"filetools":{"loadError":"Errorea gertatu da fitxategia irakurtzean.","networkError":"Sareko errorea gertatu da fitxategia kargatzean.","httpError404":"HTTP errorea gertatu da fitxategia kargatzean (404: Fitxategia ez da aurkitu).","httpError403":"HTTP errorea gertatu da fitxategia kargatzean (403: Debekatuta).","httpError":"HTTP errorea gertatu da fitxategia kargatzean (errore-egoera: %1).","noUrlError":"Kargatzeko URLa definitu gabe.","responseError":"Zerbitzariaren erantzun okerra."},"format":{"label":"Formatua","panelTitle":"Paragrafoaren formatua","tag_address":"Helbidea","tag_div":"Normala (DIV)","tag_h1":"Izenburua 1","tag_h2":"Izenburua 2","tag_h3":"Izenburua 3","tag_h4":"Izenburua 4","tag_h5":"Izenburua 5","tag_h6":"Izenburua 6","tag_p":"Normala","tag_pre":"Formatuduna"},"horizontalrule":{"toolbar":"Txertatu marra horizontala"},"image":{"alt":"Ordezko testua","border":"Ertza","btnUpload":"Bidali zerbitzarira","button2Img":"Hautatutako irudi-botoia irudi arrunt bihurtu nahi duzu?","hSpace":"HSpace","img2Button":"Hautatutako irudia irudi-botoi bihurtu nahi duzu?","infoTab":"Irudiaren informazioa","linkTab":"Esteka","lockRatio":"Blokeatu erlazioa","menu":"Irudiaren propietateak","resetSize":"Berrezarri tamaina","title":"Irudiaren propietateak","titleButton":"Irudi-botoiaren propietateak","upload":"Kargatu","urlMissing":"Irudiaren iturburuaren URLa falta da.","vSpace":"VSpace","validateBorder":"Ertza zenbaki oso bat izan behar da.","validateHSpace":"HSpace zenbaki oso bat izan behar da.","validateVSpace":"VSpace zenbaki oso bat izan behar da."},"indent":{"indent":"Handitu koska","outdent":"Txikitu koska"},"fakeobjects":{"anchor":"Aingura","flash":"Flash animazioa","hiddenfield":"Ezkutuko eremua","iframe":"IFrame-a","unknown":"Objektu ezezaguna"},"link":{"acccessKey":"Sarbide-tekla","advanced":"Aurreratua","advisoryContentType":"Aholkatutako eduki-mota","advisoryTitle":"Aholkatutako izenburua","anchor":{"toolbar":"Aingura","menu":"Editatu aingura","title":"Ainguraren propietateak","name":"Ainguraren izena","errorName":"Idatzi ainguraren izena","remove":"Kendu aingura"},"anchorId":"Elementuaren Id-aren arabera","anchorName":"Aingura-izenaren arabera","charset":"Estekatutako baliabide karaktere-jokoa","cssClasses":"Estilo-orriko klaseak","download":"Behartu deskarga","displayText":"Bistaratu testua","emailAddress":"E-posta helbidea","emailBody":"Mezuaren gorputza","emailSubject":"Mezuaren gaia","id":"Id","info":"Estekaren informazioa","langCode":"Hizkuntzaren kodea","langDir":"Hizkuntzaren norabidea","langDirLTR":"Ezkerretik eskuinera (LTR)","langDirRTL":"Eskuinetik ezkerrera (RTL)","menu":"Editatu esteka","name":"Izena","noAnchors":"(Ez dago aingurarik erabilgarri dokumentuan)","noEmail":"Mesedez idatzi e-posta helbidea","noUrl":"Mesedez idatzi estekaren URLa","noTel":"Please type the phone number","other":"<bestelakoa>","phoneNumber":"Phone number","popupDependent":"Menpekoa (Netscape)","popupFeatures":"Laster-leihoaren ezaugarriak","popupFullScreen":"Pantaila osoa (IE)","popupLeft":"Ezkerreko posizioa","popupLocationBar":"Kokaleku-barra","popupMenuBar":"Menu-barra","popupResizable":"Tamaina aldakorra","popupScrollBars":"Korritze-barrak","popupStatusBar":"Egoera-barra","popupToolbar":"Tresna-barra","popupTop":"Goiko posizioa","rel":"Erlazioa","selectAnchor":"Hautatu aingura","styles":"Estiloa","tabIndex":"Tabulazio indizea","target":"Helburua","targetFrame":"<frame>","targetFrameName":"Helburuko markoaren izena","targetPopup":"<laster-leihoa>","targetPopupName":"Laster-leihoaren izena","title":"Esteka","toAnchor":"Estekatu testuko aingurara","toEmail":"E-posta","toUrl":"URLa","toPhone":"Phone","toolbar":"Esteka","type":"Esteka-mota","unlink":"Kendu esteka","upload":"Kargatu"},"list":{"bulletedlist":"Buletdun Zerrenda","numberedlist":"Zenbakidun Zerrenda"},"magicline":{"title":"Txertatu paragrafoa hemen"},"maximize":{"maximize":"Maximizatu","minimize":"Minimizatu"},"pastetext":{"button":"Itsatsi testu arrunta bezala","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","title":"Itsatsi testu arrunta bezala"},"pastefromword":{"confirmCleanup":"Itsatsi nahi duzun testua Word-etik kopiatua dela dirudi. Itsatsi baino lehen garbitu nahi duzu?","error":"Barne-errore bat dela eta ezin izan da itsatsitako testua garbitu","title":"Itsatsi Word-etik","toolbar":"Itsatsi Word-etik"},"removeformat":{"toolbar":"Kendu formatua"},"sourcearea":{"toolbar":"Iturburua"},"specialchar":{"options":"Karaktere berezien aukerak","title":"Hautatu karaktere berezia","toolbar":"Txertatu karaktere berezia"},"scayt":{"btn_about":"SCAYTi buruz","btn_dictionaries":"Hiztegiak","btn_disable":"Desgaitu SCAYT","btn_enable":"Gaitu SCAYT","btn_langs":"Hizkuntzak","btn_options":"Aukerak","text_title":"Ortografia Zuzenketa Idatzi Ahala (SCAYT)"},"stylescombo":{"label":"Estiloak","panelTitle":"Formatu estiloak","panelTitle1":"Bloke estiloak","panelTitle2":"Lineako estiloak","panelTitle3":"Objektu estiloak"},"table":{"border":"Ertzaren zabalera","caption":"Epigrafea","cell":{"menu":"Gelaxka","insertBefore":"Txertatu gelaxka aurretik","insertAfter":"Txertatu gelaxka ondoren","deleteCell":"Ezabatu gelaxkak","merge":"Batu gelaxkak","mergeRight":"Batu eskuinetara","mergeDown":"Batu behera","splitHorizontal":"Banatu gelaxka horizontalki","splitVertical":"Banatu gelaxka bertikalki","title":"Gelaxkaren propietateak","cellType":"Gelaxka-mota","rowSpan":"Errenkaden hedadura","colSpan":"Zutabeen hedadura","wordWrap":"Itzulbira","hAlign":"Lerrokatze horizontala","vAlign":"Lerrokatze bertikala","alignBaseline":"Oinarri-lerroan","bgColor":"Atzeko planoaren kolorea","borderColor":"Ertzaren kolorea","data":"Data","header":"Goiburua","yes":"Bai","no":"Ez","invalidWidth":"Gelaxkaren zabalera zenbaki bat izan behar da.","invalidHeight":"Gelaxkaren altuera zenbaki bat izan behar da.","invalidRowSpan":"Errenkaden hedadura zenbaki osoa izan behar da.","invalidColSpan":"Zutabeen hedadura zenbaki osoa izan behar da.","chooseColor":"Aukeratu"},"cellPad":"Gelaxken betegarria","cellSpace":"Gelaxka arteko tartea","column":{"menu":"Zutabea","insertBefore":"Txertatu zutabea aurretik","insertAfter":"Txertatu zutabea ondoren","deleteColumn":"Ezabatu zutabeak"},"columns":"Zutabeak","deleteTable":"Ezabatu taula","headers":"Goiburuak","headersBoth":"Biak","headersColumn":"Lehen zutabea","headersNone":"Bat ere ez","headersRow":"Lehen errenkada","heightUnit":"height unit","invalidBorder":"Ertzaren tamaina zenbaki bat izan behar da.","invalidCellPadding":"Gelaxken betegarria zenbaki bat izan behar da.","invalidCellSpacing":"Gelaxka arteko tartea zenbaki bat izan behar da.","invalidCols":"Zutabe kopurua 0 baino handiagoa den zenbakia izan behar da.","invalidHeight":"Taularen altuera zenbaki bat izan behar da.","invalidRows":"Errenkada kopurua 0 baino handiagoa den zenbakia izan behar da.","invalidWidth":"Taularen zabalera zenbaki bat izan behar da.","menu":"Taularen propietateak","row":{"menu":"Errenkada","insertBefore":"Txertatu errenkada aurretik","insertAfter":"Txertatu errenkada ondoren","deleteRow":"Ezabatu errenkadak"},"rows":"Errenkadak","summary":"Laburpena","title":"Taularen propietateak","toolbar":"Taula","widthPc":"ehuneko","widthPx":"pixel","widthUnit":"zabalera unitatea"},"undo":{"redo":"Berregin","undo":"Desegin"},"widget":{"move":"Klikatu eta arrastatu lekuz aldatzeko","label":"%1 widget"},"uploadwidget":{"abort":"Karga erabiltzaileak bertan behera utzita.","doneOne":"Fitxategia behar bezala kargatu da.","doneMany":"Behar bezala kargatu dira %1 fitxategi.","uploadOne":"Fitxategia kargatzen ({percentage}%)...","uploadMany":"Fitxategiak kargatzen, {current} / {max} eginda ({percentage}%)..."},"wsc":{"btnIgnore":"Ezikusi","btnIgnoreAll":"Denak Ezikusi","btnReplace":"Ordezkatu","btnReplaceAll":"Denak Ordezkatu","btnUndo":"Desegin","changeTo":"Honekin ordezkatu","errorLoading":"Errorea gertatu da aplikazioa zerbitzaritik kargatzean: %s.","ieSpellDownload":"Zuzentzaile ortografikoa ez dago instalatuta. Deskargatu nahi duzu?","manyChanges":"Zuzenketa ortografikoa bukatuta: %1 hitz aldatu dira","noChanges":"Zuzenketa ortografikoa bukatuta: Ez da ezer aldatu","noMispell":"Zuzenketa ortografikoa bukatuta: Akatsik ez","noSuggestions":"- Iradokizunik ez -","notAvailable":"Barkatu baina momentu honetan zerbitzua ez dago erabilgarri.","notInDic":"Ez dago hiztegian","oneChange":"Zuzenketa ortografikoa bukatuta: Hitz bat aldatu da","progress":"Zuzenketa ortografikoa martxan...","title":"Ortografia zuzenketa","toolbar":"Ortografia"}}; | PypiClean |
/Dts-OpenFisca-Core-34.8.0.tar.gz/Dts-OpenFisca-Core-34.8.0/openfisca_core/data_storage.py |
import shutil
import os
import numpy as np
from openfisca_core import periods
from openfisca_core.periods import ETERNITY
from openfisca_core.indexed_enums import EnumArray
class InMemoryStorage(object):
"""
Low-level class responsible for storing and retrieving calculated vectors in memory
"""
def __init__(self, is_eternal = False):
self._arrays = {}
self.is_eternal = is_eternal
def get(self, period):
if self.is_eternal:
period = periods.period(ETERNITY)
period = periods.period(period)
values = self._arrays.get(period)
if values is None:
return None
return values
def put(self, value, period):
if self.is_eternal:
period = periods.period(ETERNITY)
period = periods.period(period)
self._arrays[period] = value
def delete(self, period = None):
if period is None:
self._arrays = {}
return
if self.is_eternal:
period = periods.period(ETERNITY)
period = periods.period(period)
self._arrays = {
period_item: value
for period_item, value in self._arrays.items()
if not period.contains(period_item)
}
def get_known_periods(self):
return self._arrays.keys()
def get_memory_usage(self):
if not self._arrays:
return dict(
nb_arrays = 0,
total_nb_bytes = 0,
cell_size = np.nan,
)
nb_arrays = len(self._arrays)
array = next(iter(self._arrays.values()))
return dict(
nb_arrays = nb_arrays,
total_nb_bytes = array.nbytes * nb_arrays,
cell_size = array.itemsize,
)
class OnDiskStorage(object):
"""
Low-level class responsible for storing and retrieving calculated vectors on disk
"""
def __init__(self, storage_dir, is_eternal = False, preserve_storage_dir = False):
self._files = {}
self._enums = {}
self.is_eternal = is_eternal
self.preserve_storage_dir = preserve_storage_dir
self.storage_dir = storage_dir
def _decode_file(self, file):
enum = self._enums.get(file)
if enum is not None:
return EnumArray(np.load(file), enum)
else:
return np.load(file)
def get(self, period):
if self.is_eternal:
period = periods.period(ETERNITY)
period = periods.period(period)
values = self._files.get(period)
if values is None:
return None
return self._decode_file(values)
def put(self, value, period):
if self.is_eternal:
period = periods.period(ETERNITY)
period = periods.period(period)
filename = str(period)
path = os.path.join(self.storage_dir, filename) + '.npy'
if isinstance(value, EnumArray):
self._enums[path] = value.possible_values
value = value.view(np.ndarray)
np.save(path, value)
self._files[period] = path
def delete(self, period = None):
if period is None:
self._files = {}
return
if self.is_eternal:
period = periods.period(ETERNITY)
period = periods.period(period)
if period is not None:
self._files = {
period_item: value
for period_item, value in self._files.items()
if not period.contains(period_item)
}
def get_known_periods(self):
return self._files.keys()
def restore(self):
self._files = files = {}
# Restore self._files from content of storage_dir.
for filename in os.listdir(self.storage_dir):
if not filename.endswith('.npy'):
continue
path = os.path.join(self.storage_dir, filename)
filename_core = filename.rsplit('.', 1)[0]
period = periods.period(filename_core)
files[period] = path
def __del__(self):
if self.preserve_storage_dir:
return
shutil.rmtree(self.storage_dir) # Remove the holder temporary files
# If the simulation temporary directory is empty, remove it
parent_dir = os.path.abspath(os.path.join(self.storage_dir, os.pardir))
if not os.listdir(parent_dir):
shutil.rmtree(parent_dir) | PypiClean |
/Adafruit_Blinka-8.20.1-py3-none-any.whl/adafruit_blinka/board/khadas/khadasvim3.py | """Pin definitions for the Khadas VIM3."""
from adafruit_blinka.microcontroller.amlogic.a311d import pin
GPIOAO_0 = pin.GPIO496
GPIOAO_1 = pin.GPIO497
GPIOAO_2 = pin.GPIO498
GPIOAO_3 = pin.GPIO499
GPIOAO_4 = pin.GPIO500
GPIOAO_5 = pin.GPIO501
GPIOAO_6 = pin.GPIO502
GPIOAO_7 = pin.GPIO503
GPIOAO_8 = pin.GPIO504
GPIOAO_9 = pin.GPIO505
GPIOAO_10 = pin.GPIO506
GPIOAO_11 = pin.GPIO507
GPIOE_0 = pin.GPIO508
GPIOE_1 = pin.GPIO509
GPIOE_2 = pin.GPIO510
GPIOZ_0 = pin.GPIO427
GPIOZ_1 = pin.GPIO428
GPIOZ_2 = pin.GPIO429
GPIOZ_3 = pin.GPIO430
GPIOZ_4 = pin.GPIO431
GPIOZ_5 = pin.GPIO432
GPIOZ_6 = pin.GPIO433
GPIOZ_7 = pin.GPIO434
GPIOZ_8 = pin.GPIO435
GPIOZ_9 = pin.GPIO436
GPIOZ_10 = pin.GPIO437
GPIOZ_11 = pin.GPIO438
GPIOZ_12 = pin.GPIO439
GPIOZ_13 = pin.GPIO440
GPIOZ_14 = pin.GPIO441
GPIOZ_15 = pin.GPIO442
GPIOH_0 = pin.GPIO443
GPIOH_1 = pin.GPIO444
GPIOH_2 = pin.GPIO445
GPIOH_3 = pin.GPIO446
GPIOH_4 = pin.GPIO447
GPIOH_5 = pin.GPIO448
GPIOH_6 = pin.GPIO449
GPIOH_7 = pin.GPIO450
GPIOH_8 = pin.GPIO451
BOOT_0 = pin.GPIO452
BOOT_1 = pin.GPIO453
BOOT_2 = pin.GPIO454
BOOT_3 = pin.GPIO455
BOOT_4 = pin.GPIO456
BOOT_5 = pin.GPIO457
BOOT_6 = pin.GPIO458
BOOT_7 = pin.GPIO459
BOOT_8 = pin.GPIO460
BOOT_9 = pin.GPIO461
BOOT_10 = pin.GPIO462
BOOT_11 = pin.GPIO463
BOOT_12 = pin.GPIO464
BOOT_13 = pin.GPIO465
BOOT_14 = pin.GPIO466
BOOT_15 = pin.GPIO467
GPIOC_0 = pin.GPIO468
GPIOC_1 = pin.GPIO469
GPIOC_2 = pin.GPIO470
GPIOC_3 = pin.GPIO471
GPIOC_4 = pin.GPIO472
GPIOC_5 = pin.GPIO473
GPIOC_6 = pin.GPIO474
GPIOC_7 = pin.GPIO475
GPIOA_0 = pin.GPIO460
GPIOA_1 = pin.GPIO461
GPIOA_2 = pin.GPIO462
GPIOA_3 = pin.GPIO463
GPIOA_4 = pin.GPIO464
GPIOA_5 = pin.GPIO465
GPIOA_6 = pin.GPIO466
GPIOA_7 = pin.GPIO467
GPIOA_8 = pin.GPIO468
GPIOA_9 = pin.GPIO469
GPIOA_10 = pin.GPIO470
GPIOA_11 = pin.GPIO471
GPIOA_12 = pin.GPIO472
GPIOA_13 = pin.GPIO473
GPIOA_14 = pin.GPIO474
GPIOA_15 = pin.GPIO475
GPIOX_0 = pin.GPIO476
GPIOX_1 = pin.GPIO477
GPIOX_2 = pin.GPIO478
GPIOX_3 = pin.GPIO479
GPIOX_4 = pin.GPIO480
GPIOX_5 = pin.GPIO481
GPIOX_6 = pin.GPIO482
GPIOX_7 = pin.GPIO483
GPIOX_8 = pin.GPIO484
GPIOX_9 = pin.GPIO485
GPIOX_10 = pin.GPIO486
GPIOX_11 = pin.GPIO487
GPIOX_12 = pin.GPIO488
GPIOX_13 = pin.GPIO489
GPIOX_14 = pin.GPIO490
GPIOX_15 = pin.GPIO491
GPIOX_16 = pin.GPIO492
GPIOX_17 = pin.GPIO493
GPIOX_18 = pin.GPIO494
GPIOX_19 = pin.GPIO495
SCL = pin.GPIOX_18
SDA = pin.GPIOX_17
SCLK = pin.SPI0_SCLK
MCLK0 = pin.SPI0_MCLK0
MISO = pin.SPI0_SDO
MOSI = pin.SPI0_SDI
D0 = GPIOAO_10 # PIN_13
D1 = GPIOH_6 # PIN_15
D2 = GPIOH_7 # PIN_16
D3 = GPIOAO_1 # PIN_18
D4 = GPIOAO_2 # PIN_19
D5 = GPIOA_15 # PIN_22
D6 = GPIOA_14 # PIN_23
D7 = GPIOAO_2 # PIN_25
D8 = GPIOAO_3 # PIN_26
D9 = GPIOA_1 # PIN_29
D10 = GPIOA_0 # PIN_30
D11 = GPIOA_3 # PIN_31
D12 = GPIOA_2 # PIN_32
D13 = GPIOA_4 # PIN_33
D14 = GPIOH_5 # PIN_35
D15 = GPIOH_4 # PIN_37
D16 = GPIOZ_15 # PIN_39 | PypiClean |
/MMutils-0.0.4-py3-none-any.whl/mmutils/str_utils/Mstr.py | import re
def split_(ori_str, start_str, end_str):
""" 截取字符串
创建时间:2021.10.28
:param ori_str: 需要截取的原始字符串
:param start_str: 开始的字符串,通过这个来获取开始截取的索引
:param end_str: 结束的字符串, 通过这个来获取结束截取的索引
:return: 返回截取后的字符串
"""
start_index = ori_str.find(start_str) + len(start_str) # 获取截取的开始索引
end_index = ori_str.find(end_str) # 获取截取的结束索引
split_str = ori_str[start_index: end_index] # 开始截取字符串
return split_str
def filter_(ori_str, rep_str=None, to_str=None, mode=None, ):
""" 过滤 / 替换 函数的综合
创建时间:2021.10.29 20:53
:param ori_str: 原始内容
:param rep_str: 需要替换的字符,默认为 None
:param to_str: 替换成的字符, 默认为 None
:param mode: 替换模式,目前有 csv、filename,默认为None
:return: 返回替换后的内容
"""
if mode == 'csv':
return filter_csv(ori_str)
elif mode == 'filename':
return filter_filename(ori_str)
else:
return filter_str(ori_str, rep_str, to_str)
def filter_str(ori_str, rep_str, to_str):
""" 单次 /多次 过滤 / 替换 字符串
创建时间:2021.10.29 20:50
更新时间:2021.11.18 9:55
:param ori_str: 原始内容
:param rep_str: 要替换的字符(串),用 | 符号隔开,可以多次替换
:param to_str: 替换成的字符(串),如果用 | 符号隔开,表示对应 rep_str修改
:return: 返回替换后的内容
"""
if rep_str is None or to_str is None:
raise ValueError('请给rep_str和to_str赋值')
is_split = False if rep_str.find('|') == -1 else True
if is_split:
rep_str_list = rep_str.split('|')
for rep_str_index in range(len(rep_str_list)):
is_split = False if to_str.find('|') == -1 else True
if is_split:
to_str_list = to_str.split('|')
try:
ori_str = ori_str.replace(rep_str_list[rep_str_index], to_str_list[rep_str_index])
except IndexError:
# 如果出现下标越界异常,那么默认采取替换字符串的最后一个为替换规则
ori_str = ori_str.replace(rep_str_list[rep_str_index], to_str_list[-1])
else:
ori_str = ori_str.replace(rep_str_list[rep_str_index], to_str)
return ori_str
else:
return ori_str.replace(rep_str, to_str)
def filter_csv(ori_str, rep_str=',', to_str=','):
""" 过滤追加到 csv文件中的字符串内容中的指定字符(串)为指定字符(串)
创建时间:2021.10.29 20:45
:param ori_str: 原始内容
:param rep_str: 需要替换的字符(串), 默认为 ,(英文)
:param to_str: 替换成的字符(串),默认为 ,(中文)
:return: 返回替换后的内容
"""
return ori_str.replace(rep_str, to_str)
def filter_filename(filename):
""" 去除非法文件名
创建时间:2021.10.28
:param filename: 原始的文件名
:return: 返回过滤后的文件名
"""
return re.sub('[<>/\\\\|:"*?]', "_", filename) | PypiClean |
/HY_sdk-0.2.56-py3-none-any.whl/hivisionai/hycv/vision.py | import cv2
from PIL import Image
import numpy as np
"""
接下来的开发灵感:
- upload.sh增加文件夹自动删除功能
- mtcnn增加68点人脸检测模块
"""
def ChangeImageDPI(input_path, output_path, dpi=300):
"""
改变输入图像的dpi.
input_path: 输入图像路径
output_path: 输出图像路径
dpi:打印分辨率
"""
image = Image.open(input_path)
image.save(output_path, dpi=(dpi, dpi))
# print(1)
print("Your Image's DPI have been changed. The last DPI = ({},{}) ".format(dpi,dpi))
def IDphotos_cut(x1, y1, x2, y2, img):
"""
在图片上进行滑动裁剪,输入输出为
输入:一张图片img,和裁剪框信息(x1,x2,y1,y2)
输出: 裁剪好的图片,然后裁剪框超出了图像范围,那么将用0矩阵补位
------------------------------------
x:裁剪框左上的横坐标
y:裁剪框左上的纵坐标
x2:裁剪框右下的横坐标
y2:裁剪框右下的纵坐标
crop_size:裁剪框大小
img:裁剪图像(numpy.array)
output_path:裁剪图片的输出路径
------------------------------------
"""
crop_size = (y2-y1, x2-x1)
"""
------------------------------------
temp_x_1:裁剪框左边超出图像部分
temp_y_1:裁剪框上边超出图像部分
temp_x_2:裁剪框右边超出图像部分
temp_y_2:裁剪框下边超出图像部分
------------------------------------
"""
temp_x_1 = 0
temp_y_1 = 0
temp_x_2 = 0
temp_y_2 = 0
if y1 < 0:
temp_y_1 = abs(y1)
y1 = 0
if y2 > img.shape[0]:
temp_y_2 = y2
y2 = img.shape[0]
temp_y_2 = temp_y_2 - y2
if x1 < 0:
temp_x_1 = abs(x1)
x1 = 0
if x2 > img.shape[1]:
temp_x_2 = x2
x2 = img.shape[1]
temp_x_2 = temp_x_2 - x2
# 生成一张全透明背景
print("crop_size:", crop_size)
background_bgr = np.full((crop_size[0], crop_size[1]), 255, dtype=np.uint8)
background_a = np.full((crop_size[0], crop_size[1]), 0, dtype=np.uint8)
background = cv2.merge((background_bgr, background_bgr, background_bgr, background_a))
background[temp_y_1: crop_size[0] - temp_y_2, temp_x_1: crop_size[1] - temp_x_2] = img[y1:y2, x1:x2]
return background
def resize_image_esp(input_image, esp=2000):
"""
输入:
input_path:numpy图片
esp:限制的最大边长
"""
# resize函数=>可以让原图压缩到最大边为esp的尺寸(不改变比例)
width = input_image.shape[0]
length = input_image.shape[1]
max_num = max(width, length)
if max_num > esp:
print("Image resizing...")
if width == max_num:
length = int((esp / width) * length)
width = esp
else:
width = int((esp / length) * width)
length = esp
print(length, width)
im_resize = cv2.resize(input_image, (length, width), interpolation=cv2.INTER_AREA)
return im_resize
else:
return input_image
def detect_distance(value, crop_heigh, max=0.06, min=0.04):
"""
检测人头顶与照片顶部的距离是否在适当范围内。
输入:与顶部的差值
输出:(status, move_value)
status=0 不动
status=1 人脸应向上移动(裁剪框向下移动)
status-2 人脸应向下移动(裁剪框向上移动)
---------------------------------------
value:头顶与照片顶部的距离
crop_heigh: 裁剪框的高度
max: 距离的最大值
min: 距离的最小值
---------------------------------------
"""
value = value/crop_heigh # 头顶往上的像素占图像的比例
print("value:", value)
if min <= value <= max:
return 0, 0
elif value > max:
# 头顶往上的像素比例高于max
move_value = value - max
move_value = int(move_value * crop_heigh)
# print("上移{}".format(move_value))
return 1, move_value
else:
# 头顶往上的像素比例低于min
move_value = min - value
move_value = int(move_value * crop_heigh)
# print("下移{}".format(move_value))
return 2, move_value
def draw_picture_dots(image, dots, pen_size=10, pen_color=(0, 0, 255)):
"""
给一张照片上绘制点。
image: Opencv图像矩阵
dots: 一堆点,形如[(100,100),(150,100)]
pen_size: 画笔的大小
pen_color: 画笔的颜色
"""
if isinstance(dots, dict):
dots = [v for u, v in dots.items()]
image = image.copy()
for x, y in dots:
cv2.circle(image, (int(x), int(y)), pen_size, pen_color, -1)
return image
def draw_picture_rectangle(image, bbox, pen_size=2, pen_color=(0, 0, 255)):
image = image.copy()
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[2])
y2 = int(bbox[3])
cv2.rectangle(image, (x1,y1), (x2, y2), pen_color, pen_size)
return image
def add_background(input_image, bgr=(0, 0, 0), new_background=None):
height, width = input_image.shape[0], input_image.shape[1]
b, g, r, a = cv2.split(input_image)
if new_background is None:
# 纯色填充
b2 = np.full([height, width], bgr[0], dtype=int)
g2 = np.full([height, width], bgr[1], dtype=int)
r2 = np.full([height, width], bgr[2], dtype=int)
a_cal = a / 255
output = cv2.merge(((b - b2) * a_cal + b2, (g - g2) * a_cal + g2, (r - r2) * a_cal + r2))
else:
# 有背景图的填充
background = new_background
background = cv2.resize(background, (width, height))
b2, g2, r2 = cv2.split(background)
a_cal = a / 255
output = cv2.merge(((b - b2) * a_cal + b2, (g - g2) * a_cal + g2, (r - r2) * a_cal + r2))
return output
def cover_image(image, background, x, y, mode=1):
"""
mode = 1: directly cover
mode = 2: cv2.add
mode = 3: bgra cover
"""
image = image.copy()
background = background.copy()
height1, width1 = background.shape[0], background.shape[1]
height2, width2 = image.shape[0], image.shape[1]
wuqiong_bg_y = height1 + 1
wuqiong_bg_x = width1 + 1
wuqiong_img_y = height2 + 1
wuqiong_img_x = width2 + 1
def cover_mode(image, background, imgy1=0, imgy2=-1, imgx1=0, imgx2=-1, bgy1=0, bgy2=-1, bgx1=0, bgx2=-1, mode=1):
if mode == 1:
background[bgy1:bgy2, bgx1:bgx2] = image[imgy1:imgy2, imgx1:imgx2]
elif mode == 2:
background[bgy1:bgy2, bgx1:bgx2] = cv2.add(background[bgy1:bgy2, bgx1:bgx2], image[imgy1:imgy2, imgx1:imgx2])
elif mode == 3:
b, g, r, a = cv2.split(image[imgy1:imgy2, imgx1:imgx2])
b2, g2, r2, a2 = cv2.split(background[bgy1:bgy2, bgx1:bgx2])
background[bgy1:bgy2, bgx1:bgx2, 0] = b * (a / 255) + b2 * (1 - a / 255)
background[bgy1:bgy2, bgx1:bgx2, 1] = g * (a / 255) + g2 * (1 - a / 255)
background[bgy1:bgy2, bgx1:bgx2, 2] = r * (a / 255) + r2 * (1 - a / 255)
background[bgy1:bgy2, bgx1:bgx2, 3] = cv2.add(a, a2)
return background
if x >= 0 and y >= 0:
x2 = x + width2
y2 = y + height2
if x2 <= width1 and y2 <= height1:
background = cover_mode(image, background,0,wuqiong_img_y,0,wuqiong_img_x,y,y2,x,x2,mode)
elif x2 > width1 and y2 <= height1:
# background[y:y2, x:] = image[:, :width1 - x]
background = cover_mode(image, background, 0, wuqiong_img_y, 0, width1-x, y, y2, x, wuqiong_bg_x,mode)
elif x2 <= width1 and y2 > height1:
# background[y:, x:x2] = image[:height1 - y, :]
background = cover_mode(image, background, 0, height1-y, 0, wuqiong_img_x, y, wuqiong_bg_y, x, x2,mode)
else:
# background[y:, x:] = image[:height1 - y, :width1 - x]
background = cover_mode(image, background, 0, height1-y, 0, width1-x, y, wuqiong_bg_y, x, wuqiong_bg_x,mode)
elif x < 0 and y >= 0:
x2 = x + width2
y2 = y + height2
if x2 <= width1 and y2 <= height1:
# background[y:y2, :x + width2] = image[:, abs(x):]
background = cover_mode(image, background, 0, wuqiong_img_y, abs(x), wuqiong_img_x, y, y2, 0, x+width2,mode)
elif x2 > width1 and y2 <= height1:
background = cover_mode(image, background, 0, wuqiong_img_y, abs(x), width1+abs(x), y, y2, 0, wuqiong_bg_x,mode)
elif x2 <= 0:
pass
elif x2 <= width1 and y2 > height1:
background = cover_mode(image, background, 0, height1-y, abs(x), wuqiong_img_x, y, wuqiong_bg_y, 0, x2, mode)
else:
# background[y:, :] = image[:height1 - y, abs(x):width1 + abs(x)]
background = cover_mode(image, background, 0, height1-y, abs(x), width1+abs(x), y, wuqiong_bg_y, 0, wuqiong_bg_x,mode)
elif x >= 0 and y < 0:
x2 = x + width2
y2 = y + height2
if y2 <= 0:
pass
if x2 <= width1 and y2 <= height1:
# background[:y2, x:x2] = image[abs(y):, :]
background = cover_mode(image, background, abs(y), wuqiong_img_y, 0, wuqiong_img_x, 0, y2, x, x2,mode)
elif x2 > width1 and y2 <= height1:
# background[:y2, x:] = image[abs(y):, :width1 - x]
background = cover_mode(image, background, abs(y), wuqiong_img_y, 0, width1-x, 0, y2, x, wuqiong_bg_x,mode)
elif x2 <= width1 and y2 > height1:
# background[:, x:x2] = image[abs(y):height1 + abs(y), :]
background = cover_mode(image, background, abs(y), height1+abs(y), 0, wuqiong_img_x, 0, wuqiong_bg_y, x, x2,mode)
else:
# background[:, x:] = image[abs(y):height1 + abs(y), :width1 - abs(x)]
background = cover_mode(image, background, abs(y), height1+abs(y), 0, width1-abs(x), 0, wuqiong_bg_x, x, wuqiong_bg_x,mode)
else:
x2 = x + width2
y2 = y + height2
if y2 <= 0 or x2 <= 0:
pass
if x2 <= width1 and y2 <= height1:
# background[:y2, :x2] = image[abs(y):, abs(x):]
background = cover_mode(image, background, abs(y), wuqiong_img_y, abs(x), wuqiong_img_x, 0, y2, 0, x2,mode)
elif x2 > width1 and y2 <= height1:
# background[:y2, :] = image[abs(y):, abs(x):width1 + abs(x)]
background = cover_mode(image, background, abs(y), wuqiong_img_y, abs(x), width1+abs(x), 0, y2, 0, wuqiong_bg_x,mode)
elif x2 <= width1 and y2 > height1:
# background[:, :x2] = image[abs(y):height1 + abs(y), abs(x):]
background = cover_mode(image, background, abs(y), height1+abs(y), abs(x), wuqiong_img_x, 0, wuqiong_bg_y, 0, x2,mode)
else:
# background[:, :] = image[abs(y):height1 - abs(y), abs(x):width1 + abs(x)]
background = cover_mode(image, background, abs(y), height1-abs(y), abs(x), width1+abs(x), 0, wuqiong_bg_y, 0, wuqiong_bg_x,mode)
return background
def image2bgr(input_image):
if len(input_image.shape) == 2:
input_image = input_image[:, :, None]
if input_image.shape[2] == 1:
result_image = np.repeat(input_image, 3, axis=2)
elif input_image.shape[2] == 4:
result_image = input_image[:, :, 0:3]
else:
result_image = input_image
return result_image
if __name__ == "__main__":
image = cv2.imread("./03.png", -1)
result_image = add_background(image, bgr=(255, 255, 255))
cv2.imwrite("test.jpg", result_image) | PypiClean |
/MoonNectar-0.6.0.tar.gz/MoonNectar-0.6.0/mnectar/util/decorator.py |
import functools
import inspect
import platform
from copy import copy
class Decorator:
"""
Decorator base class capable of decorating a function or class method, with or
without additional default arguments.
The default implementation does nothing and is designed to be used as part of a base
class for a new decorator.
Example:
>>> class mul(Decorator):
... def __init__(self, func=None, default=2):
... super().__init__(func)
... self.default = default
... def _call_wrapped(self, *args, **kwargs):
... args = tuple(_*2 if type(_) == int else _ for _ in args)
... kwargs = {key:val*self.default for key,val in kwargs.items()}
... super()._call_wrapped(*args, **kwargs)
... class Foo:
... @mul
... def mul2(self, val1, val2=None):
... print(f"Foo.mul2: {val1} ... {val2}")
... @mul(default=3)
... def mul3(self, val1, val2=None):
... print(f"Foo.mul3: {val1} ... {val2}")
... obj=Foo()
... obj.mul2(123)
... obj.mul2(123, 456)
... obj.mul3(3, 9)
Foo.mul2: 246 ... None
Foo.mul2: 246 ... 912
Foo.mul3: 6 ... 18
"""
def __init__(self, func=None):
"""
Override as necessary to add additional keyword arguments
"""
self.__self__ = None
if func is None:
self.__with_args = True
self.__wrapped__ = None
else:
self.__with_args = False
self.__wrapped__ = func
self._wrap_func(self, func)
def _call_wrapped(self, *args, **kwargs):
"""
Calls the wrapped function/method. Override this method to customize how the
wrapped function is called.
"""
return self.__wrapped__(*args, **kwargs)
def _wrap_func(self, obj, func):
"""
Wrap the function or class method to update __doc__ and other similar
attributes. Override this method to customize how the function is wrapped.
"""
# update __doc__ and similar attributes
functools.update_wrapper(obj, func)
return self
def __call__(self, *args, **kwargs):
if self.__with_args:
if self.__wrapped__ is None:
assert len(args) == 1
assert len(kwargs) == 0
return self._wrap_func(self, args[0])
else:
return self.__call_wrapped_function(*args, **kwargs)
else:
return self.__call_wrapped_function(*args, **kwargs)
def __get_wrapped_args(self, *args):
# if bound to an object, pass it as the first argument
if self.__self__ is not None:
args = (self.__self__,) + args
return args
def __call_wrapped_function(self, *args, **kwargs):
args = self.__get_wrapped_args(*args)
return self._call_wrapped(*args, **kwargs)
def __set_name__(self, owner, name):
self._name = name
def _bind(self, instance, owner):
"""
Override this method to customize the property binding process
"""
# create a bound copy of this object
bound = copy(self)
bound.__self__ = instance
if self.__with_args:
bound._wrap_func(self, self.__wrapped__)
else:
bound._wrap_func(bound, self.__wrapped__)
return bound
def __get__(self, instance, owner):
if instance is None:
return self
else:
bound = self._bind(instance, owner)
# add the bound decorator to the object's dict so that
# __get__ won't be called a 2nd time
setattr(instance, self.__wrapped__.__name__, bound)
return bound
class oscheck(Decorator):
def __init__(self, function=None, target=None, return_value=None, return_factory=None):
"""
Decorator which executes the specified class method only if matching the specified
operating system.
:param function: The function or method being decorated.
:param return_value: The return value to use if the check fails.
:param return_factory: The return value factory to use if the check fails (called with NO arguments)
"""
super().__init__(function)
if return_value and return_factory:
raise ValueError("Canot specify both 'return_value' and 'return_factory'!")
elif target is None:
raise ValueError("Missing required keyword 'target'")
else:
self.target = target
self.return_value = return_value
self.return_factory = return_factory
def _call_wrapped(self, *arg, **kw):
if platform.system() == self.target:
return super()._call_wrapped(*arg, **kw)
elif self.return_factory is not None:
return self.return_factory()
else:
return self.return_value
class classproperty:
"""
Simple class property decorator
"""
def __init__(self, getter):
self.getter = getter
def __get__(self, instance, owner):
return self.getter(owner) | PypiClean |
/NNGT-2.7.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl/nngt/geometry/plot.py | import numpy as np
from matplotlib.patches import PathPatch
from matplotlib.path import Path
def plot_shape(shape, axis=None, m='', mc="#999999", fc="#8888ff",
ec="#444444", alpha=0.5, brightness="height", show_contour=True,
show=True, **kwargs):
'''
Plot a shape (you should set the `axis` aspect to 1 to respect the
proportions).
Parameters
----------
shape : :class:`Shape`
Shape to plot.
axis : :class:`matplotlib.axes.Axes` instance, optional (default: None)
Axis on which the shape should be plotted. By default, a new figure
is created.
m : str, optional (default: invisible)
Marker to plot the shape's vertices, matplotlib syntax.
mc : str, optional (default: "#999999")
Color of the markers.
fc : str, optional (default: "#8888ff")
Color of the shape's interior.
ec : str, optional (default: "#444444")
Color of the shape's edges.
alpha : float, optional (default: 0.5)
Opacity of the shape's interior.
brightness : str, optional (default: height)
Show how different other areas are from the 'default_area' (lower
values are darker, higher values are lighter).
Difference can concern the 'height', or any of the `properties` of the
:class:`Area` objects.
show_contour : bool, optional (default: True)
Whether the shapes should be drawn with a contour.
show : bool, optional (default: True)
Whether the plot should be displayed immediately.
**kwargs: keywords arguments for :class:`matplotlib.patches.PathPatch`
'''
# import
import matplotlib.pyplot as plt
MultiPolygon = None
try:
from shapely.geometry import (MultiPolygon, Polygon, LineString,
MultiLineString)
except ImportError:
pass
if axis is None:
fig, axis = plt.subplots()
zorder = kwargs.get("zorder", 0)
if "zorder" in kwargs:
del kwargs["zorder"]
# plot the main shape
if isinstance(shape, MultiPolygon):
for p in shape.geoms:
plot_shape(p, axis=axis, m=m, mc=mc, fc=fc, ec=ec, alpha=alpha,
brightness=brightness, show=False,
show_contour=show_contour, **kwargs)
elif isinstance(shape, Polygon) and shape.exterior.coords:
if show_contour:
_plot_coords(axis, shape.exterior, m, mc, ec)
for path in shape.interiors:
_plot_coords(axis, path.coords, m, mc, ec)
patch = _make_patch(shape, color=fc, alpha=alpha, zorder=zorder,
**kwargs)
axis.add_patch(patch)
# take care of the areas
if hasattr(shape, "areas"):
def_area = shape.areas["default_area"]
# get the highest and lowest properties
mean = _get_prop(def_area, brightness)
low, high = np.inf, -np.inf
for name, area in shape.areas.items():
if name != "default_area":
prop = _get_prop(area, brightness)
if prop < low:
low = prop
if prop > high:
high = prop
# plot the areas
for name, area in shape.areas.items():
if name != "default_area":
prop = _get_prop(area, brightness)
color = fc
local_alpha = 0
if prop < mean:
color = "black"
local_alpha = alpha * (prop - mean) / (low - mean)
elif prop > mean:
color = "white"
local_alpha = alpha * (prop - mean) / (high - mean)
# contour
_plot_coords(axis, area.exterior, m, mc, ec)
for path in shape.interiors:
_plot_coords(axis, path.coords, m, mc, ec)
# content
patch = _make_patch(
area, color=color, alpha=local_alpha, zorder=zorder,
**kwargs)
axis.add_patch(patch)
elif isinstance(shape, (LineString, MultiLineString)):
lines = [shape] if isinstance(shape, LineString) else shape.geoms
for line in lines:
_plot_coords(axis, line.coords, m, mc, ec)
axis.set_aspect(1)
if show:
plt.show()
def _make_patch(shape, **kwargs):
'''
Construct a matplotlib patch from a geometric object
Parameters
----------
shape: :class:`NetGrowth.geometry.Shape`
may be a Shapely or GeoJSON-like object with or without holes.
kwargs: keywords arguments for :class:`matplotlib.patches.PathPatch`
Returns
-------
an instance of :class:`matplotlib.patches.PathPatch`.
Example
-------
(using Shapely Point and a matplotlib axes):
>>> b = Point(0, 0).buffer(1.0)
>>> patch = PolygonPatch(b, fc='blue', ec='blue', alpha=0.5)
>>> axis.add_patch(patch)
Modified from `descartes` by Sean Gillies (BSD license).
'''
vertices = np.concatenate(
[np.asarray(shape.exterior.coords)[:, :2]] +
[np.asarray(h.coords)[:, :2] for h in shape.interiors])
instructions = np.concatenate(
[_path_instructions(shape.exterior)] +
[_path_instructions(h) for h in shape.interiors])
path = Path(vertices, instructions)
return PathPatch(path, **kwargs)
def _path_instructions(ob):
'''
Give instructions to build path from vertices.
'''
# The codes will be all "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(ob.coords)
vals = np.ones(n, dtype=Path.code_type) * Path.LINETO
vals[0] = Path.MOVETO
return vals
def _plot_coords(ax, ob, m, mc, ec):
if hasattr(ob, 'coords') and isinstance(ob.coords, list):
for coord in ob.coords:
x, y = ob.xy
ax.plot(x, y, marker=m, ls='-', c=ec, markerfacecolor=mc, zorder=1)
else:
x, y = ob.xy
ax.plot(x, y, marker=m, ls='-', c=ec, markerfacecolor=mc, zorder=1)
def _get_prop(area, brightness):
is_height = (brightness == "height")
return area.height if is_height else area.properties[brightness] | PypiClean |
/KayleeVC-0.1.1.tar.gz/KayleeVC-0.1.1/kayleevc/kaylee.py |
import sys
import signal
import os.path
import subprocess
from gi.repository import GObject, GLib
from kayleevc.recognizer import Recognizer
from kayleevc.util import *
from kayleevc.numbers import NumberParser
class Kaylee:
def __init__(self):
self.ui = None
self.options = {}
ui_continuous_listen = False
self.continuous_listen = False
# Load configuration
self.config = Config()
self.options = vars(self.config.options)
self.commands = self.options['commands']
# Create number parser for later use
self.number_parser = NumberParser()
# Create a hasher
self.hasher = Hasher(self.config)
# Create the strings file
self.update_voice_commands_if_changed()
if self.options['interface']:
if self.options['interface'] == "g":
from kayleevc.gui import GTKInterface as UI
elif self.options['interface'] == "gt":
from kayleevc.gui import GTKTrayInterface as UI
else:
print("no GUI defined")
sys.exit()
self.ui = UI(self.options, self.options['continuous'])
self.ui.connect("command", self.process_command)
# Can we load the icon resource?
icon = self.load_resource("icon.png")
if icon:
self.ui.set_icon_active_asset(icon)
# Can we load the icon_inactive resource?
icon_inactive = self.load_resource("icon_inactive.png")
if icon_inactive:
self.ui.set_icon_inactive_asset(icon_inactive)
if self.options['history']:
self.history = []
# Update the language if necessary
self.language_updater = LanguageUpdater(self.config)
self.language_updater.update_language_if_changed()
# Create the recognizer
self.recognizer = Recognizer(self.config)
self.recognizer.connect('finished', self.recognizer_finished)
def update_voice_commands_if_changed(self):
"""Use hashes to test if the voice commands have changed"""
stored_hash = self.hasher['voice_commands']
# Calculate the hash the voice commands have right now
hasher = self.hasher.get_hash_object()
for voice_cmd in self.commands.keys():
hasher.update(voice_cmd.encode('utf-8'))
# Add a separator to avoid odd behavior
hasher.update('\n'.encode('utf-8'))
new_hash = hasher.hexdigest()
if new_hash != stored_hash:
self.create_strings_file()
self.hasher['voice_commands'] = new_hash
self.hasher.store()
def create_strings_file(self):
# Open the strings file
with open(self.config.strings_file, 'w') as strings:
# Add command words to the corpus
for voice_cmd in sorted(self.commands.keys()):
strings.write(voice_cmd.strip().replace('%d', '') + "\n")
# Add number words to the corpus
for word in self.number_parser.number_words:
strings.write(word + "\n")
def log_history(self, text):
if self.options['history']:
self.history.append(text)
if len(self.history) > self.options['history']:
# Pop off the first item
self.history.pop(0)
# Open and truncate the history file
with open(self.config.history_file, 'w') as hfile:
for line in self.history:
hfile.write(line + '\n')
def run_command(self, cmd):
"""Print the command, then run it"""
print(cmd)
subprocess.call(cmd, shell=True)
def recognizer_finished(self, recognizer, text):
t = text.lower()
numt, nums = self.number_parser.parse_all_numbers(t)
# Is there a matching command?
if t in self.commands:
# Run the valid_sentence_command if it's set
if self.options['valid_sentence_command']:
subprocess.call(self.options['valid_sentence_command'],
shell=True)
cmd = self.commands[t]
# Should we be passing words?
if self.options['pass_words']:
cmd += " " + t
self.run_command(cmd)
self.log_history(text)
elif numt in self.commands:
# Run the valid_sentence_command if it's set
if self.options['valid_sentence_command']:
subprocess.call(self.options['valid_sentence_command'],
shell=True)
cmd = self.commands[numt]
cmd = cmd.format(*nums)
# Should we be passing words?
if self.options['pass_words']:
cmd += " " + t
self.run_command(cmd)
self.log_history(text)
else:
# Run the invalid_sentence_command if it's set
if self.options['invalid_sentence_command']:
subprocess.call(self.options['invalid_sentence_command'],
shell=True)
print("no matching command {0}".format(t))
# If there is a UI and we are not continuous listen
if self.ui:
if not self.continuous_listen:
# Stop listening
self.recognizer.pause()
# Let the UI know that there is a finish
self.ui.finished(t)
def run(self):
if self.ui:
self.ui.run()
else:
self.recognizer.listen()
def quit(self):
sys.exit()
def process_command(self, UI, command):
print(command)
if command == "listen":
self.recognizer.listen()
elif command == "stop":
self.recognizer.pause()
elif command == "continuous_listen":
self.continuous_listen = True
self.recognizer.listen()
elif command == "continuous_stop":
self.continuous_listen = False
self.recognizer.pause()
elif command == "quit":
self.quit()
def load_resource(self, string):
local_data = os.path.join(os.path.dirname(__file__), '..', 'data')
paths = ["/usr/share/kaylee/", "/usr/local/share/kaylee", local_data]
for path in paths:
resource = os.path.join(path, string)
if os.path.exists(resource):
return resource
# If we get this far, no resource was found
return False
def run():
# Make our kaylee object
kaylee = Kaylee()
# Init gobject threads
GObject.threads_init()
# We want a main loop
main_loop = GObject.MainLoop()
# Handle sigint
signal.signal(signal.SIGINT, signal.SIG_DFL)
# Run the kaylee
kaylee.run()
# Start the main loop
try:
main_loop.run()
except:
print("time to quit")
main_loop.quit()
sys.exit() | PypiClean |
/Morfessor%20FlatCat-1.0.8.tar.gz/Morfessor FlatCat-1.0.8/scripts/flatcat-restitch.py |
from __future__ import unicode_literals
import argparse
import collections
import locale
import re
import string
import sys
import flatcat
from flatcat.exception import ArgumentException
from flatcat import utils
PY3 = sys.version_info.major == 3
LICENSE = """
Copyright (c) 2015, Stig-Arne Gronroos
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def get_argparser():
parser = argparse.ArgumentParser(
prog='flatcat-advanced-segment',
description="""
Morfessor FlatCat advanced segmentation and reformatting
""",
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
add_arg = parser.add_argument
add_arg('infile', metavar='<infile>',
help='The input file. The type will be sniffed automatically, '
'or can be specified manually.')
add_arg('outfile', metavar='<outfile>',
help='The output file. The type is defined by preset '
'or can be specified manually.')
add_arg('-e', '--encoding', dest='encoding', metavar='<encoding>',
help='Encoding of input and output files (if none is given, '
'both the local encoding and UTF-8 are tried).')
add_arg('--input-format', dest='input_format', type=str,
default=None, metavar='<id>',
help='Input format')
return parser
RE_BOTH_SIDES = re.compile(r'\+ \+')
RE_RIGHT_ONLY = re.compile(r'(?<!\+) \+')
RE_LEFT_ONLY = re.compile(r'\+ (?!\+)')
COMPOUND = r' +@+ '
COMPOUND_BOTH = r'@ @'
COMPOUND_LEFT = r'@ '
def restitcher(fmt, line):
if fmt == 'both_sides':
#ala+ +kive+ +n+ +kolo+ +on
line = RE_RIGHT_ONLY.sub(' ', line)
line = RE_LEFT_ONLY.sub( ' ', line)
line = RE_BOTH_SIDES.sub('', line)
return line
elif fmt == 'right_only':
line = RE_RIGHT_ONLY.sub('', line)
return line
elif fmt == 'compound_symbol':
#ala+ kive +n <c> kolo +on
line = line.replace(COMPOUND, '')
line = RE_RIGHT_ONLY.sub('', line)
line = RE_LEFT_ONLY.sub( '', line)
return line
elif fmt == 'compound_both_sides':
#ala+ +kive+ +n> <kolo+ +on
line = line.replace(COMPOUND_BOTH, '')
line = RE_RIGHT_ONLY.sub(' ', line)
line = RE_LEFT_ONLY.sub( ' ', line)
line = RE_BOTH_SIDES.sub('', line)
return line
elif (fmt == 'compound_affix'
or fmt == 'compound_modifier_affix'):
#ala+ kive +n> kolo +on
line = line.replace(COMPOUND_LEFT, '')
line = RE_RIGHT_ONLY.sub('', line)
line = RE_LEFT_ONLY.sub( '', line)
line = RE_BOTH_SIDES.sub('', line)
return line
elif fmt == 'advanced':
line = RE_RIGHT_ONLY.sub('', line)
line = RE_LEFT_ONLY.sub( '', line)
line = RE_BOTH_SIDES.sub('', line)
return line
def main(args):
io = flatcat.io.FlatcatIO(encoding=args.encoding)
with io._open_text_file_write(args.outfile) as fobj:
pipe = io._read_text_file(args.infile)
if args.outfile != '-':
pipe = utils._generator_progress(pipe)
pipe = (restitcher(args.input_format, line)
for line in pipe)
for line in pipe:
fobj.write(line)
fobj.write('\n')
if __name__ == "__main__":
parser = get_argparser()
try:
args = parser.parse_args(sys.argv[1:])
main(args)
except ArgumentException as e:
parser.error(e)
except Exception as e:
raise | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/six.py | from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <[email protected]>"
__version__ = "1.12.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
try:
if from_value is None:
raise value
raise value from from_value
finally:
value = None
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, text_type):
return s.encode(encoding, errors)
elif isinstance(s, binary_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
if PY2 and isinstance(s, text_type):
s = s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
s = s.decode(encoding, errors)
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer) | PypiClean |
/MGLEX-0.2.1.tar.gz/MGLEX-0.2.1/mglex/cli/evaluate.py |
u"""
This is the main program which takes a raw (negatively log-scaled) likelihood matrix and a class soft class assignment
matrix (responsibility) and corresponding weights, e.g. sequence lengths. Each input row corresponds to one datum and
each column corresponds to a class/genome.
Method "co-clustering":
Calculates the evaluation statistic
S = log((1/C) * \sum_i=1_C (1/|C_i|*(|C_i|-1)) * \sum_{d_1, d_2 \element C_i, d_1 != d_2} p(d_1|C_i)*p(d_2|C_i))
The expected (log-)probability that any two probalistically linked contigs (prior knowledge) are grouped together in a
cluster.
Method "separation":
For each class, the likelihood distribution is evaluated and a statistic
of how well the null hypothesis (positive class) distribution is separated from the alternative hypothesis
distribution (negative class/other data). The statistic can aid in comparing and selecting appropriate models which
transform raw data into observation likelihoods. It is the mean-squared error of all classes, where each class error
is the summation of multiplied error probabilities (p-values of the two distributions) when dividing the data into two
classes at a specific likelihood value.This measure can be generalized to pairs of sequences which should _not_ belong
together in a cluster (between) and for fuzzy label distributions.
Method "mse":
Mean squared error is a fast evaluation measure which is the summed squared difference per datum between the true
(responsibility) posterior and the predicted posterior distribution. Input likelihood must be normalized so that it
sums to one over all classes.
Usage:
evaluate (--help | --version)
evaluate (--responsibility <file>) (--method <method>) (--weight <file>)
[--data <file>] [--subsample <int>] [--random-seed <int>] [--beta <from(:to:step)>]...
-h, --help Show this screen
-v, --version Show version
-d <file>, --data <file> Likelihood matrix; default standard input
-r <file>, --responsibility <file> Responsibility (weight) matrix file
-w <file>, --weight <file> Weights (sequence length) file
-m <method>, --method <method> Evaluation method; one of "mse", "mse-flex",
"co-clustering", "separation"
-s <int>, --subsample <int> Subsample this number of data points for faster calculation
-z <int>, --random-seed <int> Seed for random operations
-b <from(:to:step)>, --beta <from(:to:step)> Beta correction factor(s) to evaluate; default 1.0
"""
import sys
import numpy as np
# some ugly code which makes this run as a standalone script
try: # when run inside module
from .. import *
except SystemError: # when run independenly, needs mglex package in path
try:
from mglex import *
except ImportError:
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[2]))
from mglex import *
__author__ = "[email protected]"
from mglex import __version__
methods = {"separation": evaluation.twoclass_separation,
"co-clustering": evaluation.expected_pairwise_clustering,
"mse": evaluation.mean_squared_error,
"mse-flex": evaluation.mean_squared_error_flex
}
def main(argv):
from docopt import docopt
argument = docopt(__doc__, argv=argv, version=__version__)
common.handle_broken_pipe()
# print(argument["--beta"])
subsample = argument["--subsample"]
try:
subsample = int(subsample)
except TypeError:
pass
if argument["--beta"]:
betalist_tmp = set()
for s in argument["--beta"]:
if ':' in s:
fr, to, step = [float(f) for f in s.split(":")]
betalist_tmp |= frozenset(np.arange(fr, to, step))
else:
betalist_tmp.add(float(s))
betalist = sorted(betalist_tmp)
else:
betalist = [1.0]
# load input
if argument["--data"]:
likelihood = common.load_probmatrix_file(argument["--data"])
else:
likelihood = common.load_probmatrix(sys.stdin)
responsibility = common.load_probmatrix_file(argument["--responsibility"])
weights = common.load_seqlens_file(argument["--weight"])
# weights = 100.0*np.asarray(weights/weights.max(), dtype=types.prob_type) # TODO: refactor
n = likelihood.shape[0]
if subsample and subsample < n: # subsample input sequences, if requested
try:
common.set_random_seed(int(argument["--random-seed"]))
except TypeError:
sys.stderr.write("No random seed given, consider setting a random seed for better reproducibility.\n")
rows_sampling = np.random.choice(n, subsample, replace=False)
likelihood = likelihood[rows_sampling]
responsibility = responsibility[rows_sampling]
weights = weights[rows_sampling]
if argument["--method"] == "separation": # no exp and no beta scaling
score = methods[argument["--method"]](likelihood, responsibility, weights)
sys.stdout.write("%.4f\n" % (score))
else:
responsibility = np.exp(responsibility)
for beta in betalist:
likelihood_tmp = common.exp_normalize(beta*likelihood)
score = methods[argument["--method"]](likelihood_tmp, responsibility, weights, logarithmic=False)
sys.stdout.write("%.2f\t%.6f\n" % (beta, score))
if __name__ == "__main__":
main(sys.argv[1:]) | PypiClean |
/Bibtex_File_Comparison_and_Update-1.4.tar.gz/Bibtex_File_Comparison_and_Update-1.4/Bibtex-File-Comparison-and-Update/controller.py | import model
import pymongo
from bibtexparser import *
from Tkinter import *
class Controller():
""" This class is used as an interface between the model and the view """
def __init__(self,v):
"""initialize view instance and compare the given files"""
self.view = v
self.compare_files()
def compare_files(self):
"""parse the two bibtex files using bibtex parser and create two bibtex databases replicate those two databases as mongo databases compare the records from the local file with records from the master file"""
conn = connect_mongo()
self.model = model.Model(conn,self.view.master_file.get(),self.view.local_file.get())
list_add_prop = []
list_if_equal = []
(list_if_equal,list_add_prop) = compare_records(self.model.mongo_db.mongo_local,self.model.mongo_db.mongo_master)
self.view.list_differences(list_if_equal,list_add_prop)
def update(self,is_update,list_change,list_add):
"""
Update the model and the local file with the user driven modifications and close the controller instance
Args:
is_update: Flag indicates if the user made any selections to update the current local file
list_change: List of properties for each record that have different values on the master file and the local file
list_add: List of properties for each record that are present on the master file but not on the local
"""
self.model.update(is_update,list_change,list_add)
self.model.update_bibtexDB(is_update)
self.file_update(is_update)
self.close()
def file_update(self,is_update):
"""
Update the current local file with changes selected by the user
Args:
is_update: Flag indicates if the user made any selections to update the current local file
"""
if(is_update):
open(self.view.local_file.get(), 'w').close()
with open(self.view.local_file.get(), 'w') as bibtex_file:
bibtex_str = bibtexparser.dumps(self.model.bibdb_local)
bibtex_file.write(bibtex_str.encode('utf8'))
def close(self):
self.model.close()
self.view.close()
def connect_mongo():
"""Connect to the pymongo client
Raises:
pymongo.errors.ConnectionFailure: If no mongo server is running
"""
try:
conn = pymongo.MongoClient()
print "Connected Successfully"
return conn
except pymongo.errors.ConnectionFailure, e:
print "Could not connect to MongoDb"
def delete_id (elem):
"""Delete the property '_id' from the dictionary
Args:
elem: Dictionary
"""
for each in elem:
if(each[0]=="_id"):
elem.remove(each)
return elem
def compare_records(coll1,coll2):
"""Compares the records in the mongo db collection coll1 with the records in collection coll2 and prepares two lists
Args:
coll1: Local File MongoDb collection
coll2: Master File MongoDb collection
Returns:
(list1,list2): list1 has records with properties having different values on the two colelctions
list2 has records with properties present in coll2 but not in coll1
"""
tk=Tk()
list_if_equal=[]
list_add_prop=[]
for element in coll1.find():
cursor = coll2.find({"ID":element["ID"]})
if(cursor.count()>0):
for doc in cursor:
master = doc
a = sorted(element.items())
b = sorted(master.items())
a= delete_id(a)
b= delete_id(b)
for idx,val in enumerate(b):
value = None
for each in a:
if (each[0]==b[idx][0]):
value = each[1]
break
if (value!=None):
#if the property exists with a different value
if(b[idx][1]!=value):
e = (element["ID"],(b[idx][0],value,idx,IntVar()),(b[idx][0],b[idx][1],idx,IntVar()))
list_if_equal.append(e)
else:
e = (element["ID"],(b[idx][0],b[idx][1],idx,IntVar()))
list_add_prop.append(e)
tk.destroy()
return (list_if_equal,list_add_prop) | PypiClean |
/Hypyxel-0.1.0.post1-py3-none-any.whl/hypyxel/hypixel.py | import requests
from hypyxel.errors import UUIDNotFoundError, ApiKeyError
from hypyxel.utils import _get_key, _get_uuid, _key_check
hypixel_base_url = "https://api.hypixel.net"
endpoints = {
"status":"/status",
"watchdog":"/watchdogstats",
"player":"/player"
}
def get_endpoints():
"""Returns a dict of fuctions and the associated endpoint"""
return endpoints
def status(username):
"""Get the status for a player\n
Important: `username` MUST be a username, not a UUID.
This is not the same as `player`
example response content if online:
.. container:: operations
.. describe:: len(x)
{"success":true,"session":{"online":true,"gameType":"SKYBLOCK","mode":"hub"}}
or if not online:
.. container:: operations
.. describe:: len(x)
{"success":true,"session":{"online":false}}
Raises: `ApiKeyError` if the api key has not been set, or `UUIDNotFoundError` if a uuid could not be found for the username.
"""
_key_check()
return requests.request("GET", f"{hypixel_base_url}{endpoints['status']}?key={_get_key()}?uuid={_get_uuid(username)}").json()
def watchdog():
"""Get watchdog stats
example response:
.. container:: operations
.. describe:: len(x)
{"success":true,"watchdog_lastMinute":1,"staff_rollingDaily":2011,"watchdog_total":5501269,"watchdog_rollingDaily":2786,"staff_total":1823141}
Raises: `ApiKeyError` if the api key has not been set.
"""
_key_check()
return requests.request("GET", f"{hypixel_base_url}{endpoints['watchdog']}?key={_get_key()}").json()
def player(username):
"""Get information for a player\n
Important: `username` MUST be a username, not a UUID.
This is not the same as `status`
The example response is too big to put here
Raises: `ApiKeyError` if the api key has not been set, or `UUIDNotFoundError` if a uuid could not be found for the username.
"""
_key_check()
return requests.request("GET", f"{hypixel_base_url}{endpoints['player']}?key={_get_key()}?uuid={_get_uuid(username)}").json() | PypiClean |
/GalSim-2.4.11-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl/galsim/spergel.py |
import numpy as np
import math
from . import _galsim
from .gsobject import GSObject
from .gsparams import GSParams
from .utilities import lazy_property, doc_inherit
from .errors import GalSimRangeError, GalSimIncompatibleValuesError, convert_cpp_errors
class Spergel(GSObject):
r"""A class describing a Spergel profile.
The Spergel surface brightness profile is characterized by three properties: its Spergel index
``nu``, its ``flux``, and either the ``half_light_radius`` or ``scale_radius``. Given these
properties, the surface brightness profile scales as
.. math::
I(r) \sim \left(\frac{r}{r_0}\right)^\nu K_\nu\left(\frac{r}{r_0}\right)
where :math:`r_0` is the ``scale_radius`` and :math:`K_\nu` is the modified Bessel function of
the second kind.
The Spergel profile is intended as a generic galaxy profile, somewhat like a `Sersic` profile,
but with the advantage of being analytic in both real space and Fourier space. The Spergel
index :math:`\nu` plays a similar role to the Sersic index :math:`n`, in that it adjusts the
relative peakiness of the profile core and the relative prominence of the profile wings.
At :math:`\nu = 0.5`, the Spergel profile is equivalent to an `Exponential` profile (or
alternatively an :math`n = 1` `Sersic` profile). At :math:`\nu = -0.6` (and in the radial
range near the half-light radius), the Spergel profile is similar to a `DeVaucouleurs` profile
or :math:`n = 4` `Sersic` profile.
Note that for :math:`\nu <= 0`, the Spergel profile surface brightness diverges at the origin.
This may lead to rendering problems if the profile is not convolved by either a PSF or a pixel
and the profile center is precisely on a pixel center.
Due to its analytic Fourier transform and depending on the indices :math:`n` and :math:`\nu`,
the Spergel profile can be considerably faster to draw than the roughly equivalent `Sersic`
profile. For example, the :math:`\nu = -0.6` Spergel profile is roughly 3x faster to draw than
an :math:`n = 4` `Sersic` profile once the `Sersic` profile cache has been set up. However, if
not taking advantage of the cache, for example, if drawing `Sersic` profiles with :math:`n`
continuously varying near 4.0 and Spergel profiles with :math:`\nu` continuously varying near
-0.6, then the Spergel profiles are about 50x faster to draw. At the other end of the galaxy
profile spectrum, the :math:`\nu = 0.5` Spergel profile, :math:`n = 1` `Sersic` profile, and
the `Exponential` profile all take about the same amount of time to draw if cached, and the
Spergel profile is about 2x faster than the `Sersic` profile if uncached.
For more information, refer to
D. N. Spergel, "ANALYTICAL GALAXY PROFILES FOR PHOTOMETRIC AND LENSING ANALYSIS,"
ASTROPHYS J SUPPL S 191(1), 58-65 (2010) [doi:10.1088/0067-0049/191/1/58].
The allowed range of values for the ``nu`` parameter is -0.85 <= ``nu`` <= 4. An exception
will be thrown if you provide a value outside that range. The lower limit is set above the
theoretical lower limit of -1 due to numerical difficulties integrating the *very* peaky
``nu`` < -0.85 profiles. The upper limit is set to avoid numerical difficulties evaluating the
modified Bessel function of the second kind.
A Spergel profile can be initialized using one (and only one) of two possible size parameters:
``scale_radius`` or ``half_light_radius``. Exactly one of these two is required.
Parameters:
nu: The Spergel index, nu.
half_light_radius: The half-light radius of the profile. Typically given in arcsec.
[One of ``scale_radius`` or ``half_light_radius`` is required.]
scale_radius: The scale radius of the profile. Typically given in arcsec.
[One of ``scale_radius`` or ``half_light_radius`` is required.]
flux: The flux (in photons/cm^2/s) of the profile. [default: 1]
gsparams: An optional `GSParams` argument. [default: None]
"""
_req_params = { "nu" : float }
_opt_params = { "flux" : float}
_single_params = [ { "scale_radius" : float , "half_light_radius" : float } ]
_has_hard_edges = False
_is_axisymmetric = True
_is_analytic_x = True
_is_analytic_k = True
# Constrain range of allowed Spergel index nu. Spergel (2010) Table 1 lists values of nu
# from -0.9 to +0.85. We found that nu = -0.9 is too tricky for the GKP integrator to
# handle, however, so the lower limit is -0.85 instead. The upper limit is set by the
# cyl_bessel_k function, which runs into overflow errors for nu larger than about 4.0.
_minimum_nu = -0.85
_maximum_nu = 4.0
def __init__(self, nu, half_light_radius=None, scale_radius=None,
flux=1., gsparams=None):
self._nu = float(nu)
self._flux = float(flux)
self._gsparams = GSParams.check(gsparams)
if self._nu < Spergel._minimum_nu:
raise GalSimRangeError("Requested Spergel index is too small",
self._nu, Spergel._minimum_nu, Spergel._maximum_nu)
if self._nu > Spergel._maximum_nu:
raise GalSimRangeError("Requested Spergel index is too large",
self._nu, Spergel._minimum_nu, Spergel._maximum_nu)
# Parse the radius options
if half_light_radius is not None:
if scale_radius is not None:
raise GalSimIncompatibleValuesError(
"Only one of scale_radius or half_light_radius may be specified",
half_light_radius=half_light_radius, scale_radius=scale_radius)
self._hlr = float(half_light_radius)
with convert_cpp_errors():
self._r0 = self._hlr / _galsim.SpergelCalculateHLR(self._nu)
elif scale_radius is not None:
self._r0 = float(scale_radius)
self._hlr = 0.
else:
raise GalSimIncompatibleValuesError(
"Either scale_radius or half_light_radius must be specified for Spergel",
half_light_radius=half_light_radius, scale_radius=scale_radius)
@lazy_property
def _sbp(self):
with convert_cpp_errors():
return _galsim.SBSpergel(self._nu, self._r0, self._flux, self.gsparams._gsp)
@property
def nu(self):
"""The Spergel index, nu
"""
return self._nu
@property
def scale_radius(self):
"""The scale radius
"""
return self._r0
@property
def half_light_radius(self):
"""The half-light radius
"""
if self._hlr == 0.:
with convert_cpp_errors():
self._hlr = self._r0 * _galsim.SpergelCalculateHLR(self._nu)
return self._hlr
def calculateIntegratedFlux(self, r):
"""Return the integrated flux out to a given radius, r"""
return self._sbp.calculateIntegratedFlux(float(r))
def calculateFluxRadius(self, f):
"""Return the radius within which the total flux is f"""
return self._sbp.calculateFluxRadius(float(f))
def __eq__(self, other):
return (self is other or
(isinstance(other, Spergel) and
self.nu == other.nu and
self.scale_radius == other.scale_radius and
self.flux == other.flux and
self.gsparams == other.gsparams))
def __hash__(self):
return hash(("galsim.Spergel", self.nu, self.scale_radius, self.flux, self.gsparams))
def __repr__(self):
return 'galsim.Spergel(nu=%r, scale_radius=%r, flux=%r, gsparams=%r)'%(
self.nu, self.scale_radius, self.flux, self.gsparams)
def __str__(self):
s = 'galsim.Spergel(nu=%s, half_light_radius=%s'%(self.nu, self.half_light_radius)
if self.flux != 1.0:
s += ', flux=%s'%self.flux
s += ')'
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_sbp',None)
return d
def __setstate__(self, d):
self.__dict__ = d
@property
def _maxk(self):
# (1+k^2)^(-1-nu) = maxk_threshold
return math.sqrt(self.gsparams.maxk_threshold ** (-1./(1.+self._nu)) - 1.0) / self._r0
@property
def _stepk(self):
R = self.calculateFluxRadius(1.0 - self.gsparams.folding_threshold) * self._r0
# Go to at least 5*hlr
R = max(R, self.gsparams.stepk_minimum_hlr * self.half_light_radius)
return math.pi / R
@property
def _max_sb(self):
return self._sbp.maxSB()
def _xValue(self, pos):
return self._sbp.xValue(pos._p)
def _kValue(self, kpos):
ksq = (kpos.x**2 + kpos.y**2) * self._r0**2
return self._flux * (1.+ksq)**(-1.-self._nu)
def _drawReal(self, image, jac=None, offset=(0.,0.), flux_scaling=1.):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
dx,dy = offset
self._sbp.draw(image._image, image.scale, _jac, dx, dy, flux_scaling)
def _shoot(self, photons, rng):
self._sbp.shoot(photons._pa, rng._rng)
def _drawKImage(self, image, jac=None):
_jac = 0 if jac is None else jac.__array_interface__['data'][0]
self._sbp.drawK(image._image, image.scale, _jac)
@doc_inherit
def withFlux(self, flux):
return Spergel(nu=self.nu, scale_radius=self.scale_radius, flux=flux,
gsparams=self.gsparams) | PypiClean |
/Muntjac-1.1.2.tar.gz/Muntjac-1.1.2/muntjac/event/mouse_events.py | from muntjac.terminal.gwt.client.mouse_event_details import MouseEventDetails
from muntjac.event.component_event_listener import IComponentEventListener
from muntjac.ui.component import Event as ComponentEvent
class ClickEvent(ComponentEvent):
"""Class for holding information about a mouse click event. A
L{ClickEvent} is fired when the user clicks on a C{Component}.
The information available for click events are terminal dependent.
Correct values for all event details cannot be guaranteed.
@author: Vaadin Ltd.
@author: Richard Lincoln
@see: L{ClickListener}
@version: 1.1.2
"""
BUTTON_LEFT = MouseEventDetails.BUTTON_LEFT
BUTTON_MIDDLE = MouseEventDetails.BUTTON_MIDDLE
BUTTON_RIGHT = MouseEventDetails.BUTTON_RIGHT
def __init__(self, source, mouseEventDetails):
super(ClickEvent, self).__init__(source)
self._details = mouseEventDetails
def getButton(self):
"""Returns an identifier describing which mouse button the user pushed.
Compare with L{BUTTON_LEFT}, L{BUTTON_MIDDLE}, L{BUTTON_RIGHT} to
find out which button it is.
@return: one of L{BUTTON_LEFT}, L{BUTTON_MIDDLE}, L{BUTTON_RIGHT}.
"""
return self._details.getButton()
def getClientX(self):
"""Returns the mouse position (x coordinate) when the click took place.
The position is relative to the browser client area.
@return: The mouse cursor x position
"""
return self._details.getClientX()
def getClientY(self):
"""Returns the mouse position (y coordinate) when the click took place.
The position is relative to the browser client area.
@return: The mouse cursor y position
"""
return self._details.getClientY()
def getRelativeX(self):
"""Returns the relative mouse position (x coordinate) when the click
took place. The position is relative to the clicked component.
@return: The mouse cursor x position relative to the clicked layout
component or -1 if no x coordinate available
"""
return self._details.getRelativeX()
def getRelativeY(self):
"""Returns the relative mouse position (y coordinate) when the click
took place. The position is relative to the clicked component.
@return: The mouse cursor y position relative to the clicked layout
component or -1 if no y coordinate available
"""
return self._details.getRelativeY()
def isDoubleClick(self):
"""Checks if the event is a double click event.
@return: true if the event is a double click event, false otherwise
"""
return self._details.isDoubleClick()
def isAltKey(self):
"""Checks if the Alt key was down when the mouse event took place.
@return: true if Alt was down when the event occured, false otherwise
"""
return self._details.isAltKey()
def isCtrlKey(self):
"""Checks if the Ctrl key was down when the mouse event took place.
@return: true if Ctrl was pressed when the event occured, false
otherwise
"""
return self._details.isCtrlKey()
def isMetaKey(self):
"""Checks if the Meta key was down when the mouse event took place.
@return: true if Meta was pressed when the event occured, false
otherwise
"""
return self._details.isMetaKey()
def isShiftKey(self):
"""Checks if the Shift key was down when the mouse event took place.
@return: true if Shift was pressed when the event occured, false
otherwise
"""
return self._details.isShiftKey()
def getButtonName(self):
"""Returns a human readable string representing which button has been
pushed. This is meant for debug purposes only and the string returned
could change. Use L{getButton} to check which button was pressed.
@return: A string representation of which button was pushed.
"""
return self._details.getButtonName()
class IClickListener(IComponentEventListener):
"""Interface for listening for a L{ClickEvent} fired by a L{Component}.
@see: L{ClickEvent}
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: 1.1.2
"""
def click(self, event):
"""Called when a L{Component} has been clicked. A reference to the
component is given by L{ClickEvent.getComponent}.
@param event:
An event containing information about the click.
"""
raise NotImplementedError
clickMethod = click
class DoubleClickEvent(ComponentEvent):
"""Class for holding additional event information for DoubleClick events.
Fired when the user double-clicks on a C{Component}.
@see: L{ClickEvent}
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: 1.1.2
"""
def __init__(self, source):
super(DoubleClickEvent, self).__init__(source)
class IDoubleClickListener(IComponentEventListener):
"""Interface for listening for a L{DoubleClickEvent} fired by a
L{Component}.
@see: L{DoubleClickEvent}
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: 1.1.2
"""
def doubleClick(self, event):
"""Called when a L{Component} has been double clicked. A reference
to the component is given by L{DoubleClickEvent.getComponent}.
@param event:
An event containing information about the double click.
"""
raise NotImplementedError
doubleClickMethod = doubleClick | PypiClean |
/BiocFrame-0.3.2.tar.gz/BiocFrame-0.3.2/CHANGELOG.md | # Changelog
## Version 0.3 (development)
This release migrates the package to a more palatable Google's Python style guide. A major modification to the package is with casing, all `camelCase` methods, functions and parameters are now `snake_case`.
In addition, docstrings and documentation has been updated to use sphinx's features of linking objects to their types. Sphinx now also documents private and special dunder methods (e.g. `__getitem__`, `__copy__` etc). Intersphinx has been updated to link to references from dependent packages.
configuration for flake8, ruff and black has been added to pyproject.toml and setup.cfg to be less annoying.
In addition, pyscaffold has been updated to use "myst-parser" as the markdown compiler instead of recommonmark. As part of the pyscaffold setup, one may use pre-commits to run some of the routine tasks of linting and formatting before every commit. While this is sometimes annoying and can be ignored with `--no-verify`, it brings some consistency to the code base.
## Version 0.2
- refactor DataFrame as BiocFrame
- implementing slicing methods, tests
## Version 0.1
- Initial creation of DataFrame class
- Tests
- Documentation
- GitHub actions
| PypiClean |
/CsuSort-1.0.15.tar.gz/CsuSort-1.0.15/Sort/deep_sort/iou_matching.py | from __future__ import absolute_import
import numpy as np
from . import linear_assignment
def iou(bbox, candidates):
"""Computer intersection over union.
Parameters
----------
bbox : ndarray
A bounding box in format `(top left x, top left y, width, height)`.
candidates : ndarray
A matrix of candidate bounding boxes (one per row) in the same format
as `bbox`.
Returns
-------
ndarray
The intersection over union in [0, 1] between the `bbox` and each
candidate. A higher score means a larger fraction of the `bbox` is
occluded by the candidate.
"""
bbox_tl, bbox_br = bbox[:2], bbox[:2] + bbox[2:]
candidates_tl = candidates[:, :2]
candidates_br = candidates[:, :2] + candidates[:, 2:]
tl = np.c_[np.maximum(bbox_tl[0], candidates_tl[:, 0])[:, np.newaxis],
np.maximum(bbox_tl[1], candidates_tl[:, 1])[:, np.newaxis]]
br = np.c_[np.minimum(bbox_br[0], candidates_br[:, 0])[:, np.newaxis],
np.minimum(bbox_br[1], candidates_br[:, 1])[:, np.newaxis]]
wh = np.maximum(0., br - tl)
area_intersection = wh.prod(axis=1)
area_bbox = bbox[2:].prod()
area_candidates = candidates[:, 2:].prod(axis=1)
return area_intersection / (area_bbox + area_candidates - area_intersection)
def iou_cost(tracks, detections, track_indices=None,
detection_indices=None):
"""An intersection over union distance metric.
Parameters
----------
tracks : List[deep_sort.track.Track]
A list of tracks.
detections : List[deep_sort.detection.Detection]
A list of detections.
track_indices : Optional[List[int]]
A list of indices to tracks that should be matched. Defaults to
all `tracks`.
detection_indices : Optional[List[int]]
A list of indices to detections that should be matched. Defaults
to all `detections`.
Returns
-------
ndarray
Returns a cost matrix of shape
len(track_indices), len(detection_indices) where entry (i, j) is
`1 - iou(tracks[track_indices[i]], detections[detection_indices[j]])`.
"""
if track_indices is None:
track_indices = np.arange(len(tracks))
if detection_indices is None:
detection_indices = np.arange(len(detections))
cost_matrix = np.zeros((len(track_indices), len(detection_indices)))
for row, track_idx in enumerate(track_indices):
if tracks[track_idx].time_since_update > 1:
cost_matrix[row, :] = linear_assignment.INFTY_COST
continue
bbox = tracks[track_idx].to_tlwh()
candidates = np.asarray([detections[i].tlwh for i in detection_indices])
cost_matrix[row, :] = 1. - iou(bbox, candidates)
return cost_matrix | PypiClean |
/MultiRunnable-0.17.0a2-py3-none-any.whl/multirunnable/_singletons.py | from inspect import isclass as inspect_isclass
from typing import Dict, TypeVar, Generic, Any
from abc import ABCMeta
T = TypeVar("T")
def chk_cls(_cls):
if inspect_isclass(_cls) is False:
raise ValueError("The target object be decorated should be a 'class' level type object.")
def simple_singleton(_class):
chk_cls(_class)
__Instance: Generic[T] = None
def get_instance(*args, **kwargs):
nonlocal __Instance
if __Instance is None:
__Instance = _class(*args, **kwargs)
return __Instance
return get_instance
def singleton(_class):
chk_cls(_class)
class _SingletonClass(_class):
_Instance: Generic[T] = None
def __new__(_class, *args, **kwargs):
if _SingletonClass._Instance is None:
_SingletonClass._Instance = super(_SingletonClass, _class).__new__(_class)
_SingletonClass._Instance._sealed = False
return _SingletonClass._Instance
def __init__(self, *args, **kwargs):
if _SingletonClass._Instance._sealed:
return
super(_SingletonClass, self).__init__(*args, **kwargs)
_SingletonClass._Instance._sealed = True
_SingletonClass.__name__ = _class.__name__
_SingletonClass.__doc__ = _class.__doc__
_SingletonClass.__str__ = _class.__str__
_SingletonClass.__repr__ = _class.__repr__
return _SingletonClass
class Singleton:
_Instance: Generic[T] = None
def __new__(cls, *args, **kwargs):
if cls._Instance is None:
cls._Instance = super(Singleton, cls).__new__(cls)
return cls._Instance
class NamedSingleton:
_Instances: Dict[str, Any] = {}
def __new__(cls, *args, **kwargs):
_cls_name = cls.__name__
if _cls_name not in cls._Instances.keys():
cls._Instances[_cls_name] = super(NamedSingleton, cls).__new__(cls)
return cls._Instances[_cls_name]
class ConnectionPoolSingleton:
_Instances: Dict[str, Any] = {}
def __new__(cls, *args, **kwargs):
_pool_name = kwargs.get("pool_name", "")
if _pool_name not in cls._Instances.keys():
cls._Instances[_pool_name] = super(ConnectionPoolSingleton, cls).__new__(cls)
return cls._Instances[_pool_name]
class SingletonMeta(type):
_Instance: Generic[T] = None
def __call__(cls, *args, **kwargs):
if cls._Instance is None:
__super_cls = super(SingletonMeta, cls).__call__(*args, **kwargs)
cls._Instance = __super_cls
return cls._Instance
class NamedSingletonMeta(type):
_Instances: Dict[str, Any] = {}
def __call__(cls, *args, **kwargs):
_cls_name = cls.__name__
if _cls_name not in cls._Instances:
__super_cls = super(NamedSingletonMeta, cls).__call__(*args, **kwargs)
cls._Instances[_cls_name] = __super_cls
return cls._Instances[_cls_name]
class NamedSingletonABCMeta(ABCMeta):
_NamedInstances: Dict[str, Any] = {}
def __call__(cls, *args, **kwargs):
_cls_name = cls.__name__
if _cls_name not in cls._NamedInstances:
__super_cls = super(NamedSingletonABCMeta, cls).__call__(*args, **kwargs)
cls._NamedInstances[_cls_name] = __super_cls
return cls._NamedInstances[_cls_name] | PypiClean |
/MadEngine_server_mess-1.1.0.tar.gz/MadEngine_server_mess-1.1.0/server/server/server_DB.py | from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey, DateTime, Boolean
from sqlalchemy.orm import mapper, sessionmaker
import datetime
class ServerStorage:
class AllUsers:
def __init__(self, username, password_hash, ip_address, port, sender_count, recepient_count):
self.username = username
self.passwd_hash = password_hash
self.last_login = datetime.datetime.now()
self.ip_address = ip_address
self.port = port
self.id = None
self.sender_count = sender_count
self.recepient_count = recepient_count
class ClientHistory:
def __init__(self, user_id, ip_address, port, login_time):
self.id = None
self.user_id = user_id
self.ip_address = ip_address
self.port = port
self.login_time = login_time
class ClientContacts:
def __init__(self, user_id, contact_name, contact_time, message, send_count, recep_count, is_friend):
self.user_id = user_id
self.contact_name = contact_name
self.contact_time = contact_time
self.message = message
self.send_count = send_count
self.recep_count = recep_count
self.is_friend = is_friend
def __init__(self, path):
self.database_engine = create_engine(
f'sqlite:///{path}',
echo=False,
pool_recycle=7200,
connect_args={
'check_same_thread': False})
self.metadata = MetaData()
# Создание таблицы пользователей
users_table = Table('Users', self.metadata,
Column('id', Integer, primary_key=True),
Column('username', String, unique=True),
Column('passwd_hash', String),
Column('ip_address', String),
Column('port', String),
Column('last_login', DateTime),
Column('sender_count', Integer),
Column('recepient_count', Integer)
)
# Создание таблицы истории активности пользователей
login_history = Table('login_history', self.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', ForeignKey('Users.id')),
Column('ip_address', String),
Column('port', String),
Column('login_time', DateTime),
)
# Создание таблицы истории контактов пользователей
users_contacts = Table('users_contacts', self.metadata,
Column('id', Integer, primary_key=True),
Column('user_id', ForeignKey('Users.id')),
Column('contact_name', String),
Column('message', String),
Column('contact_time', DateTime),
Column('send_count', Integer),
Column('recep_count', Integer),
Column('is_friend', Boolean)
)
# Создаем таблицы
self.metadata.create_all(self.database_engine)
mapper(self.AllUsers, users_table)
mapper(self.ClientHistory, login_history)
mapper(self.ClientContacts, users_contacts)
# Создаем сессию
session = sessionmaker(bind=self.database_engine)
self.session = session()
def user_login(self, username, ip_address, port):
res = self.session.query(self.AllUsers).filter_by(username=username)
if res.count():
user = res.first()
user.last_login = datetime.datetime.now()
print(user.ip_address)
user.ip_address = ip_address
user.port = port
else:
sender_count = 0
recepient_count = 0
print(self.get_hash(username))
user = self.AllUsers(username, self.get_hash(username), ip_address, port, sender_count, recepient_count)
self.session.add(user)
self.session.commit()
date_time = datetime.datetime.now()
history = self.ClientHistory(user.id, ip_address, port, date_time)
self.session.add(history)
self.session.commit()
def add_user(self, name, passwd_hash):
"""
Метод регистрации пользователя.
Принимает имя и хэш пароля, создаёт запись в таблице статистики.
"""
sender_count = 0
recepient_count = 0
user = self.AllUsers(name, passwd_hash, None, None, sender_count, recepient_count)
self.session.add(user)
self.session.commit()
date_time = datetime.datetime.now()
history = self.ClientHistory(user.id, None, None, date_time)
self.session.add(history)
self.session.commit()
def check_user(self, name):
"""Метод проверяющий существование пользователя."""
if self.session.query(self.AllUsers).filter_by(username=name).count():
return True
else:
return False
def get_hash(self, name):
"""Метод получения хэша пароля пользователя."""
user = self.session.query(self.AllUsers).filter_by(username=name).first()
return user.passwd_hash
def contact(self, username, add_contact_name, contact_time, message):
sender = self.session.query(self.AllUsers).filter_by(username=username).first()
try:
recipient = self.session.query(self.AllUsers).filter_by(username=add_contact_name).first()
sender.sender_count += 1
recipient.recepient_count += 1
res = self.session.query(self.AllUsers).filter_by(username=username)
user = res.first()
# print(user)
res = self.session.query(self.ClientContacts).filter_by(user_id=sender.id)
if res:
for contact in res:
if contact.contact_name == add_contact_name:
contact.is_friend = True
# print(contact.is_friend)
is_friend = True
contacts = self.ClientContacts(user.id, add_contact_name, contact_time, message, sender.sender_count,
recipient.recepient_count, is_friend)
self.session.add(contacts)
except AttributeError:
pass
self.session.commit()
def del_contact(self, username, del_contact_name):
try:
sender = self.session.query(self.AllUsers).filter_by(username=username).first()
recipient = self.session.query(self.AllUsers).filter_by(username=del_contact_name).first()
sender.sender_count += 1
recipient.recepient_count += 1
res = self.session.query(self.ClientContacts).filter_by(user_id=sender.id)
for contact in res:
if contact.contact_name == del_contact_name:
contact.is_friend = False
# print(contact.is_friend)
except AttributeError:
pass
self.session.commit()
def user_list(self, username=None):
query = self.session.query(
self.AllUsers.id,
self.AllUsers.username,
self.AllUsers.ip_address,
self.AllUsers.port,
self.AllUsers.last_login,
self.AllUsers.sender_count,
self.AllUsers.recepient_count
)
if username:
query = query.filter(self.AllUsers.username == username)
return query.all()
def users_gui(self):
query = self.session.query(
self.AllUsers.username,
self.AllUsers.ip_address,
self.AllUsers.port,
self.AllUsers.last_login
)
return query.all()
def stat_gui(self):
query = self.session.query(
self.AllUsers.username,
self.AllUsers.last_login,
self.AllUsers.sender_count,
self.AllUsers.recepient_count
)
return query.all()
def history(self, username=None):
query = self.session.query(
self.AllUsers.username,
self.ClientHistory.login_time,
self.ClientHistory.ip_address,
self.ClientHistory.port
).join(self.AllUsers)
if username:
query = query.filter(self.AllUsers.username == username)
return query.all()
def contacts_list(self, username):
query = self.session.query(
self.AllUsers.username,
self.ClientContacts.contact_name,
self.ClientContacts.message,
self.ClientContacts.contact_time,
self.ClientContacts.is_friend
).join(self.AllUsers)
if username:
query = query.filter(self.AllUsers.username == username)
query_is_friend = query.filter(True == self.ClientContacts.is_friend)
return query_is_friend.all()
def to_client_message(self, username):
query = self.session.query(
self.AllUsers.username,
self.ClientContacts.contact_name,
self.ClientContacts.message,
self.ClientContacts.contact_time,
self.ClientContacts.is_friend
).join(self.AllUsers)
if username:
query = query.filter(self.ClientContacts.contact_name == username)
return query.all()
if __name__ == '__main__':
test_db = ServerStorage()
# print("Версия SQLAlchemy:", sqlalchemy.__version__)
# test_db.user_login('client_1', '127.0.0.1', 7777)
# test_db.user_login('client_2', '127.0.0.1', 8888)
# test_db.user_login('client_3', '127.0.0.1', 7878)
# test_db.user_login('client_4', '127.0.0.1', 7888)
# test_db.user_login('client_5', '127.0.0.1', 7888)
# print('============== test AllUsers ==============')
# pprint(test_db.user_list())
# #
# test_db.contact('client_2', 'client_1', datetime.datetime.now(), 'test_2_1')
# test_db.contact('client_2', 'client_3', datetime.datetime.now(), 'test_2_3')
# test_db.contact('client_3', 'client_1', datetime.datetime.now(), 'test_3_1')
# test_db.contact('client_3', 'client_2', datetime.datetime.now(), 'test_3_2')
#
# print('============== test ClientsContacts ==============')
# test_db.contacts_list('client_2')
# test_db.contacts_list(None)
# pprint(test_db.contacts_list('client_2'))
#
# print('============== test ClientsHistory ==============')
# pprint(test_db.history())
# pprint(test_db.history('client_3')) | PypiClean |
/Electrum-CHI-3.3.8.tar.gz/Electrum-CHI-3.3.8/packages/wheel/bdist_wheel.py | import os
import shutil
import sys
import re
from email.generator import Generator
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from glob import iglob
from shutil import rmtree
from warnings import warn
import pkg_resources
from .pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag, get_platform
from .pkginfo import write_pkg_info
from .metadata import pkginfo_to_metadata
from .wheelfile import WheelFile
from . import pep425tags
from . import __version__ as wheel_version
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
PY_LIMITED_API_PATTERN = r'cp3\d'
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths "
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
('build-number=', None,
"Build number for this particular version. "
"As specified in PEP-0427, this must start with a digit. "
"[default: None]"),
('py-limited-api=', None,
"Python tag (cp32|cp33|cpNN) for abi3 wheel tag"
" (default: false)"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.plat_tag = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.egginfo_dir = None
self.root_is_pure = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
self.build_number = None
self.py_limited_api = False
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
self.plat_name_supplied = self.plat_name is not None
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_pure = not (self.distribution.has_ext_modules()
or self.distribution.has_c_libraries())
if self.py_limited_api and not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api):
raise ValueError("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN)
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
logger.warn('The [wheel] section is deprecated. Use [bdist_wheel] instead.')
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
if self.build_number is not None and not self.build_number[:1].isdigit():
raise ValueError("Build tag (build-number) must start with a digit.")
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
components = (safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version()))
if self.build_number:
components += (self.build_number,)
return '-'.join(components)
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
plat_name = self.plat_name or get_platform()
if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
plat_name = 'linux_i686'
plat_name = plat_name.replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
impl = impl_name + impl_ver
# We don't work on CPython 3.1, 3.0.
if self.py_limited_api and (impl_name + impl_ver).startswith('cp3'):
impl = self.py_limited_api
abi_tag = 'abi3'
else:
abi_tag = str(get_abi_tag()).lower()
tag = (impl, abi_tag, plat_name)
supported_tags = pep425tags.get_supported(
supplied_platform=plat_name if self.plat_name_supplied else None)
# XXX switch to this alternate implementation for non-pure:
if not self.py_limited_api:
assert tag == supported_tags[0], "%s != %s" % (tag, supported_tags[0])
assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag)
return tag
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
build_scripts.force = True
build_ext = self.reinitialize_command('build_ext')
build_ext.inplace = False
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_pure else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "{}-{}-{}-{}".format(self.wheel_dist_name, impl_tag, abi_tag, plat_tag)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options('install_egg_info', ('target', 'egginfo_dir'))
distinfo_dirname = '{}-{}.dist-info'.format(
safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version()))
distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
self.egg2dist(self.egginfo_dir, distinfo_dir)
self.write_wheelfile(distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_path = os.path.join(self.dist_dir, archive_basename + '.whl')
with WheelFile(wheel_path, 'w') as wf:
wf.write_files(archive_root)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_path))
if not self.keep_temp:
logger.info('removing %s', self.bdist_dir)
if not self.dry_run:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
if self.build_number is not None:
msg['Build'] = self.build_number
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
@property
def license_paths(self):
metadata = self.distribution.get_option_dict('metadata')
files = set()
patterns = sorted({
option for option in metadata.get('license_files', ('', ''))[1].split()
})
if 'license_file' in metadata:
warn('The "license_file" option is deprecated. Use "license_files" instead.',
DeprecationWarning)
files.add(metadata['license_file'][1])
if 'license_file' not in metadata and 'license_files' not in metadata:
patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*')
for pattern in patterns:
for path in iglob(pattern):
if path not in files and os.path.isfile(path):
logger.info('adding license file "%s" (matched pattern "%s")', path, pattern)
files.add(path)
return files
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt',
'not-zip-safe'}
)
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
for license_path in self.license_paths:
filename = os.path.basename(license_path)
shutil.copy(license_path, os.path.join(distinfo_path, filename))
adios(egginfo_path) | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/HTML-CSS/fonts/STIX-Web/Symbols/Regular/Main.js | MathJax.OutputJax["HTML-CSS"].FONTDATA.FONTS.STIXMathJax_Symbols={directory:"Symbols/Regular",family:"STIXMathJax_Symbols",testString:"\u00A0\u2300\u2302\u2305\u2306\u230C\u230D\u230E\u230F\u2310\u2311\u2312\u2313\u2315\u2316",32:[0,0,250,0,0],160:[0,0,250,0,0],8960:[487,-14,606,25,581],8962:[774,0,926,55,871],8965:[577,0,620,48,572],8966:[728,0,620,48,572],8972:[166,215,463,52,412],8973:[166,215,463,52,412],8974:[876,-495,463,52,412],8975:[876,-495,463,52,412],8976:[393,-115,600,48,552],8977:[439,-65,523,75,449],8978:[331,0,762,50,712],8979:[331,0,762,50,712],8981:[582,189,847,26,796],8982:[748,246,1100,53,1047],8983:[749,245,1100,53,1047],8984:[662,156,926,55,871],8985:[393,-115,600,48,552],8986:[671,69,685,64,622],8988:[662,-281,463,51,411],8989:[662,-281,463,51,411],8990:[164,217,463,51,411],8991:[164,217,463,52,412],9001:[713,213,400,77,335],9002:[713,213,400,65,323],9004:[692,186,926,83,843],9005:[592,88,986,55,931],9006:[450,140,624,-18,574],9010:[562,56,889,80,809],9014:[751,156,926,85,841],9021:[683,179,910,84,826],9023:[703,176,683,60,623],9024:[703,176,683,60,623],9043:[751,176,794,55,739],9072:[751,176,794,55,739],9084:[584,220,871,50,820],9107:[386,-120,913,85,841],9108:[633,127,926,24,902],9135:[286,-220,315,0,315],9142:[495,-11,926,55,871],9166:[731,225,926,50,856],9186:[558,53,1144,54,1090],9187:[680,178,910,82,828],9188:[286,-220,1094,47,1047],9189:[527,20,1018,23,995],9190:[434,-72,926,55,871],9191:[606,97,798,194,733],10176:[584,0,685,50,634],10177:[811,127,1145,35,1110],10178:[662,0,693,52,641],10179:[529,27,685,60,625],10180:[529,27,685,61,626],10181:[702,198,455,55,400],10182:[702,198,455,55,400],10183:[536,29,620,31,589],10184:[533,25,966,60,906],10185:[533,25,966,60,906],10187:[662,156,902,0,863],10188:[806,213,325,20,325],10189:[662,156,902,0,863],10192:[744,242,1064,39,1025],10193:[536,29,620,31,589],10194:[536,31,620,48,572],10195:[584,0,685,50,634],10196:[584,0,685,50,634],10197:[582,80,1019,40,965],10198:[582,80,1019,54,979],10199:[582,80,1228,40,1188],10200:[718,213,866,50,816],10201:[718,213,866,50,816],10202:[662,0,1376,64,1312],10203:[662,0,1376,64,1312],10204:[403,-103,849,50,799],10205:[450,-57,1574,55,1519],10206:[450,-57,1574,55,1519],10207:[693,187,502,101,401],10208:[795,289,790,45,745],10209:[589,87,764,45,719],10210:[589,87,803,45,758],10211:[589,87,803,45,758],10212:[662,158,1182,45,1137],10213:[662,158,1182,45,1137],10214:[717,213,504,188,482],10215:[717,213,504,22,316],10218:[719,213,610,73,545],10219:[719,213,610,65,537],10220:[719,213,488,178,466],10221:[719,213,488,22,310],10624:[695,189,594,85,509],10625:[487,-14,565,46,519],10626:[566,59,503,110,393],10627:[719,213,596,108,477],10628:[719,213,596,119,488],10629:[719,213,463,70,393],10630:[719,213,463,70,393],10631:[719,214,511,115,367],10632:[719,214,511,144,396],10633:[719,213,511,100,352],10634:[719,213,511,159,411],10635:[719,213,469,188,447],10636:[719,213,469,22,281],10637:[719,213,469,188,447],10638:[719,213,469,22,281],10639:[719,213,469,188,447],10640:[719,213,469,22,281],10641:[719,213,400,73,357],10642:[719,213,400,73,357],10643:[649,143,685,34,591],10644:[649,143,685,94,651],10645:[649,143,685,86,643],10646:[649,143,685,42,599],10649:[661,155,211,50,161],10650:[662,156,511,177,334],10651:[547,72,685,42,662],10652:[584,0,685,50,634],10653:[584,0,685,50,634],10654:[547,0,685,11,675],10655:[396,0,685,24,643],10656:[517,13,685,57,654],10657:[609,-12,685,77,607],10658:[547,0,685,42,662],10659:[547,0,685,42,662],10660:[547,200,685,23,643],10661:[547,200,685,42,662],10662:[547,0,900,40,860],10663:[547,0,900,40,860],10664:[574,72,685,29,649],10665:[574,72,685,36,656],10666:[578,68,685,29,649],10667:[578,68,685,36,656],10668:[562,58,706,34,680],10669:[562,58,706,26,672],10670:[562,58,706,34,680],10671:[562,58,708,26,672],10672:[583,79,762,50,712],10673:[717,79,762,50,712],10674:[819,79,762,50,712],10675:[832,79,762,50,712],10676:[832,79,762,50,712],10677:[623,119,910,24,886],10678:[623,119,842,50,792],10679:[623,119,842,50,792],10680:[623,119,842,50,792],10681:[623,119,842,50,792],10682:[623,119,842,50,792],10683:[623,119,842,50,792],10684:[623,119,842,50,792],10685:[882,179,842,50,792],10686:[623,119,842,50,792],10687:[623,119,842,50,792],10688:[623,119,842,50,792],10689:[623,119,842,50,792],10690:[623,119,1091,50,1056],10691:[623,119,1091,50,1056],10692:[662,158,910,45,865],10693:[662,158,910,45,865],10694:[662,158,910,45,865],10695:[662,158,910,45,865],10696:[662,158,910,45,865],10697:[712,207,1046,64,982],10698:[1003,127,1145,35,1110],10699:[811,259,1145,35,1110],10700:[811,127,1145,35,1110],10701:[811,127,1165,15,1150],10702:[698,193,780,70,710],10703:[531,25,857,48,777],10704:[531,25,857,80,809],10705:[582,80,810,93,716],10706:[582,80,810,93,716],10707:[582,80,810,93,716],10708:[582,80,810,94,717],10709:[582,80,810,93,716],10710:[602,100,810,74,736],10711:[602,100,810,74,736],10712:[620,116,511,177,334],10713:[620,116,511,176,333],10714:[620,116,688,177,511],10715:[620,116,688,177,511],10716:[430,0,926,70,854],10717:[653,0,926,70,854],10718:[695,189,926,70,854],10719:[403,-103,1145,50,1095],10720:[662,157,910,45,865],10721:[512,8,667,24,613],10722:[414,0,790,64,726],10723:[662,156,685,47,637],10724:[842,156,685,47,637],10725:[662,156,685,48,637],10726:[584,78,798,60,738],10727:[695,189,628,48,580],10728:[811,127,1145,35,1110],10729:[811,127,1145,35,1110],10730:[744,241,762,32,730],10732:[743,241,762,50,712],10733:[743,241,762,50,712],10734:[747,243,762,97,665],10735:[747,243,762,97,665],10736:[747,243,762,32,730],10737:[747,243,762,32,730],10738:[747,243,762,65,697],10739:[747,243,762,65,697],10740:[521,13,926,55,871],10742:[765,80,520,94,426],10743:[662,80,520,94,426],10746:[532,25,685,64,621],10747:[532,25,685,64,621],10748:[713,213,459,77,394],10749:[713,213,459,65,382],10750:[540,36,762,93,669],10751:[316,-190,762,93,669],57498:[719,213,708,18,690],57499:[719,213,708,18,690],57535:[836,236,636,50,586],57536:[836,236,636,50,586],57537:[836,236,636,50,586],57538:[836,236,636,50,586],57539:[386,-120,750,50,700],57540:[478,-28,750,50,700],57541:[478,-28,750,50,700],57542:[286,-220,750,50,700],57543:[402,-120,750,50,700],57544:[386,-120,1000,50,950],57545:[478,-28,1000,50,950],57546:[544,38,1000,50,950],57547:[386,-120,750,50,700],57548:[478,-28,750,50,700],57549:[544,38,750,50,700],57550:[836,236,636,50,586],57551:[836,236,636,50,586],57552:[836,236,636,50,586],57553:[836,236,636,50,586],57554:[692,186,926,83,843],57555:[633,127,926,24,902],57556:[633,127,926,24,902],57557:[286,-220,1000,50,950],57558:[386,-120,750,50,700]};MathJax.Callback.Queue(["initFont",MathJax.OutputJax["HTML-CSS"],"STIXMathJax_Symbols"],["loadComplete",MathJax.Ajax,MathJax.OutputJax["HTML-CSS"].fontDir+"/Symbols/Regular/Main.js"]); | PypiClean |
/FreePyBX-1.0-RC1.tar.gz/FreePyBX-1.0-RC1/freepybx/public/js/dojox/lang/docs.js | define(["dijit","dojo","dojox"],function(_1,_2,_3){
_2.provide("dojox.lang.docs");
(function(){
function _4(_5){
};
var _6={};
var _7=[];
var _8=_3.lang.docs._loadedDocs={};
var _9=function(_a,_b){
_6[_b]=_a;
};
var _c=function(_d){
var _e=_d.type||"";
var _f,_10=false,_11=false,_12;
_e=_e.replace(/\?/,function(){
_10=true;
return "";
});
_e=_e.replace(/\[\]/,function(){
_11=true;
return "";
});
if(_e.match(/HTML/)){
_e="string";
}else{
if(_e=="String"||_e=="Number"||_e=="Boolean"||_e=="Object"||_e=="Array"||_e=="Integer"||_e=="Function"){
_e=_e.toLowerCase();
}else{
if(_e=="bool"){
_e="boolean";
}else{
if(_e){
_f=_2.getObject(_e)||{};
_12=true;
}else{
_f={};
}
}
}
}
_f=_f||{type:_e};
if(_11){
_f={items:_f,type:"array"};
_12=false;
}
if(!_12){
if(_10){
_f.optional=true;
}
if(/const/.test(_d.tags)){
_f.readonly=true;
}
}
return _f;
};
var _13=function(_14,_15){
var _16=_8[_15];
if(_16){
_14.description=_16.description;
_14.properties={};
_14.methods={};
if(_16.properties){
var _17=_16.properties;
for(var i=0,l=_17.length;i<l;i++){
if(_17[i].scope=="prototype"){
var _18=_14.properties[_17[i].name]=_c(_17[i]);
_18.description=_17[i].summary;
}
}
}
if(_16.methods){
var _19=_16.methods;
for(i=0,l=_19.length;i<l;i++){
_15=_19[i].name;
if(_15&&_19[i].scope=="prototype"){
var _1a=_14.methods[_15]={};
_1a.description=_19[i].summary;
var _1b=_19[i].parameters;
if(_1b){
_1a.parameters=[];
for(var j=0,k=_1b.length;j<k;j++){
var _1c=_1b[j];
var _1d=_1a.parameters[j]=_c(_1c);
_1d.name=_1c.name;
_1d.optional="optional"==_1c.usage;
}
}
var ret=_19[i]["return-types"];
if(ret&&ret[0]){
var _1e=_c(ret[0]);
if(_1e.type){
_1a.returns=_1e;
}
}
}
}
}
var _1f=_16.superclass;
if(_1f){
_14["extends"]=_2.getObject(_1f);
}
}
};
var _20=function(_21){
_7.push(_21);
};
var _22=_2.declare;
_2.declare=function(_23){
var _24=_22.apply(this,arguments);
_9(_24,_23);
return _24;
};
_2.mixin(_2.declare,_22);
var _25;
var _26=_2.require;
_2.require=function(_27){
_20(_27);
var _28=_26.apply(this,arguments);
return _28;
};
_3.lang.docs.init=function(_29){
function _2a(){
_2.require=_26;
_7=null;
try{
_2.xhrGet({sync:!_29,url:_2.baseUrl+"../util/docscripts/api.json",handleAs:"text"}).addCallbacks(function(obj){
_8=(new Function("return "+obj))();
obj=null;
_9=_13;
for(var i in _6){
_9(_6[i],i);
}
_6=null;
},_4);
}
catch(e){
_4(e);
}
};
if(_25){
return null;
}
_25=true;
var _2b=function(_2c,_2d){
return _2.xhrGet({sync:_2d||!_29,url:_2.baseUrl+"../util/docscripts/api/"+_2c+".json",handleAs:"text"}).addCallback(function(obj){
obj=(new Function("return "+obj))();
for(var _2e in obj){
if(!_8[_2e]){
_8[_2e]=obj[_2e];
}
}
});
};
try{
var _2f=_7.shift();
_2b(_2f,true).addCallbacks(function(){
_20=function(_30){
if(!_8[_30]){
try{
_2b(_30);
}
catch(e){
_8[_30]={};
}
}
};
_2.forEach(_7,function(mod){
_20(mod);
});
_7=null;
_9=_13;
for(i in _6){
_9(_6[i],i);
}
_6=null;
},_2a);
}
catch(e){
_2a();
}
return null;
};
})();
}); | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/yaml/yaml/parser.py |
__all__ = ['Parser', 'ParserError']
from .error import MarkedYAMLError
from .tokens import *
from .events import *
from .scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser:
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
DEFAULT_TAGS = {
'!': '!',
'!!': 'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.states = []
self.marks = []
self.state = self.parse_stream_start
def dispose(self):
# Reset the state attributes (to clear self-references)
self.states = []
self.state = None
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
if self.state:
self.current_event = self.state()
return self.current_event
def get_event(self):
# Get the next event and proceed further.
if self.current_event is None:
if self.state:
self.current_event = self.state()
value = self.current_event
self.current_event = None
return value
# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
# implicit_document ::= block_node DOCUMENT-END*
# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
def parse_stream_start(self):
# Parse the stream start.
token = self.get_token()
event = StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Prepare the next state.
self.state = self.parse_implicit_document_start
return event
def parse_implicit_document_start(self):
# Parse an implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=False)
# Prepare the next state.
self.states.append(self.parse_document_end)
self.state = self.parse_block_node
return event
else:
return self.parse_document_start()
def parse_document_start(self):
# Parse any extra document end indicators.
while self.check_token(DocumentEndToken):
self.get_token()
# Parse an explicit document.
if not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
event = DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
self.states.append(self.parse_document_end)
self.state = self.parse_document_content
else:
# Parse the end of the stream.
token = self.get_token()
event = StreamEndEvent(token.start_mark, token.end_mark)
assert not self.states
assert not self.marks
self.state = None
return event
def parse_document_end(self):
# Parse the document end.
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
if self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
event = DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Prepare the next state.
self.state = self.parse_document_start
return event
def parse_document_content(self):
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
event = self.process_empty_scalar(self.peek_token().start_mark)
self.state = self.states.pop()
return event
else:
return self.parse_block_node()
def process_directives(self):
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == 'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == 'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle,
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
# block_node_or_indentless_sequence ::= ALIAS
# | properties (block_content | indentless_block_sequence)?
# | block_content
# | indentless_block_sequence
# block_node ::= ALIAS
# | properties block_content?
# | block_content
# flow_node ::= ALIAS
# | properties flow_content?
# | flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# flow_collection ::= flow_sequence | flow_mapping
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
if self.check_token(AliasToken):
token = self.get_token()
event = AliasEvent(token.value, token.start_mark, token.end_mark)
self.state = self.states.pop()
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None:
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle,
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == '!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
implicit = (tag is None or tag == '!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
self.state = self.parse_indentless_sequence_entry
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == '!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
self.state = self.states.pop()
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_sequence_first_entry
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
self.state = self.parse_flow_mapping_first_key
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_sequence_first_entry
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
self.state = self.parse_block_mapping_first_key
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), '',
start_mark, end_mark)
self.state = self.states.pop()
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while parsing a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
return event
# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
def parse_block_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_sequence_entry()
def parse_block_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
self.states.append(self.parse_block_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_block_sequence_entry
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block collection", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
def parse_indentless_sequence_entry(self):
if self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_indentless_sequence_entry)
return self.parse_block_node()
else:
self.state = self.parse_indentless_sequence_entry
return self.process_empty_scalar(token.end_mark)
token = self.peek_token()
event = SequenceEndEvent(token.start_mark, token.start_mark)
self.state = self.states.pop()
return event
# block_mapping ::= BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
def parse_block_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_block_mapping_key()
def parse_block_mapping_key(self):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_value)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_value
return self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while parsing a block mapping", self.marks[-1],
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_block_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
self.states.append(self.parse_block_mapping_key)
return self.parse_block_node_or_indentless_sequence()
else:
self.state = self.parse_block_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_block_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
def parse_flow_sequence_first_entry(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_sequence_entry(first=True)
def parse_flow_sequence_entry(self, first=False):
if not self.check_token(FlowSequenceEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow sequence", self.marks[-1],
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.peek_token()
event = MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
self.state = self.parse_flow_sequence_entry_mapping_key
return event
elif not self.check_token(FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry)
return self.parse_flow_node()
token = self.get_token()
event = SequenceEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_sequence_entry_mapping_key(self):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_value
return self.process_empty_scalar(token.end_mark)
def parse_flow_sequence_entry_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
self.states.append(self.parse_flow_sequence_entry_mapping_end)
return self.parse_flow_node()
else:
self.state = self.parse_flow_sequence_entry_mapping_end
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_sequence_entry_mapping_end
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_sequence_entry_mapping_end(self):
self.state = self.parse_flow_sequence_entry
token = self.peek_token()
return MappingEndEvent(token.start_mark, token.start_mark)
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
def parse_flow_mapping_first_key(self):
token = self.get_token()
self.marks.append(token.start_mark)
return self.parse_flow_mapping_key(first=True)
def parse_flow_mapping_key(self, first=False):
if not self.check_token(FlowMappingEndToken):
if not first:
if self.check_token(FlowEntryToken):
self.get_token()
else:
token = self.peek_token()
raise ParserError("while parsing a flow mapping", self.marks[-1],
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_value)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_value
return self.process_empty_scalar(token.end_mark)
elif not self.check_token(FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_empty_value)
return self.parse_flow_node()
token = self.get_token()
event = MappingEndEvent(token.start_mark, token.end_mark)
self.state = self.states.pop()
self.marks.pop()
return event
def parse_flow_mapping_value(self):
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
self.states.append(self.parse_flow_mapping_key)
return self.parse_flow_node()
else:
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(token.end_mark)
else:
self.state = self.parse_flow_mapping_key
token = self.peek_token()
return self.process_empty_scalar(token.start_mark)
def parse_flow_mapping_empty_value(self):
self.state = self.parse_flow_mapping_key
return self.process_empty_scalar(self.peek_token().start_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), '', mark, mark) | PypiClean |
/KADaP01-2.0.0-py3-none-any.whl/yolov5/detect.py | from models.common import DetectMultiBackend
from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams
from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
increment_path, non_max_suppression, print_args, scale_coords, strip_optimizer, xyxy2xywh)
from utils.plots import Annotator, colors, save_one_box
from utils.torch_utils import select_device, smart_inference_mode
from pathlib import Path
import os
import platform
import sys
import torch
class Detect :
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
def __init__(self) :
self
@smart_inference_mode()
def run(
weights=ROOT / 'yolov5s.pt', # model.pt path(s)
source=ROOT / 'test/images', # file/dir/URL/glob, 0 for webcam
data=ROOT / 'data/coco128.yaml', # dataset.yaml path
imgsz=(640, 640), # inference size (height, width)
conf_thres=0.5, # confidence threshold
iou_thres=0.45, # NMS IOU threshold
max_det=1000, # maximum detections per image
device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu
view_img=False, # show results
save_txt=False, # save results to *.txt
save_conf=False, # save confidences in --save-txt labels
save_crop=False, # save cropped prediction boxes
nosave=False, # do not save images/videos
classes=None, # filter by class: --class 0, or --class 0 2 3
agnostic_nms=False, # class-agnostic NMS
augment=False, # augmented inference
visualize=False, # visualize features
update=False, # update all models
project=ROOT / 'runs/', # save results to project/name
name='exp', # save results to project/name
exist_ok=False, # existing project/name ok, do not increment
line_thickness=3, # bounding box thickness (pixels)
hide_labels=False, # hide labels
hide_conf=False, # hide confidences
half=False, # use FP16 half-precision inference
dnn=False, # use OpenCV DNN for ONNX inference
vid_stride=1, # video frame-rate stride
):
source = str(source)
save_img = not nosave and not source.endswith('.txt') # save inference images
is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS)
is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
if is_url and is_file:
source = check_file(source) # download
# Directories
save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
device = select_device(device)
model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half)
stride, names, pt = model.stride, model.names, model.pt
imgsz = check_img_size(imgsz, s=stride) # check image size
# Dataloader
dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
bs = 1 # batch_size
# vid_path, vid_writer = [None] * bs, [None] * bs
# Run inference
model.warmup(imgsz=(1 if pt else bs, 3, *imgsz)) # warmup
seen, windows, dt = 0, [], (Profile(), Profile(), Profile())
for path, im, im0s, vid_cap, s in dataset:
with dt[0]:
im = torch.from_numpy(im).to(device)
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
im /= 255 # 0 - 255 to 0.0 - 1.0
if len(im.shape) == 3:
im = im[None] # expand for batch dim
# Inference
with dt[1]:
visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False
pred = model(im, augment=augment, visualize=visualize)
# NMS
with dt[2]:
pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
# Second-stage classifier (optional)
# pred = utils.general.apply_classifier(pred, classifier_model, im, im0s)
# Process predictions
for i, det in enumerate(pred): # per image
seen += 1
p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0)
p = Path(p) # to Path
save_path = str(save_dir / p.name) # im.jpg
txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt
s += '%gx%g ' % im.shape[2:] # print string
gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
imc = im0.copy() if save_crop else im0 # for save_crop
annotator = Annotator(im0, line_width=line_thickness, example=str(names))
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round()
# Print results
for c in det[:, 5].unique():
n = (det[:, 5] == c).sum() # detections per class
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(f'{txt_path}.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if save_img or save_crop or view_img: # Add bbox to image
c = int(cls) # integer class
label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}')
annotator.box_label(xyxy, label, color=colors(c, True))
if save_crop:
save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True)
# Stream results
im0 = annotator.result()
if view_img:
if platform.system() == 'Linux' and p not in windows:
windows.append(p)
cv2.namedWindow(str(p), cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO) # allow window resize (Linux)
cv2.resizeWindow(str(p), im0.shape[1], im0.shape[0])
cv2.imshow(str(p), im0)
cv2.waitKey(1) # 1 millisecond
# Save results (image with detections)
if save_img:
cv2.imwrite(save_path, im0)
# if dataset.mode == 'image':
# cv2.imwrite(save_path, im0)
# else: # 'video' or 'stream'
# if vid_path[i] != save_path: # new video
# vid_path[i] = save_path
# if isinstance(vid_writer[i], cv2.VideoWriter):
# vid_writer[i].release() # release previous video writer
# if vid_cap: # video
# fps = vid_cap.get(cv2.CAP_PROP_FPS)
# w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# else: # stream
# fps, w, h = 30, im0.shape[1], im0.shape[0]
# save_path = str(Path(save_path).with_suffix('.mp4')) # force *.mp4 suffix on results videos
# vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
# vid_writer[i].write(im0)
# Print time (inference-only)
LOGGER.info(f"{s}{'' if len(det) else '(no detections), '}{dt[1].dt * 1E3:.1f}ms")
# Print results
t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image
LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t)
if save_txt or save_img:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}")
if update:
strip_optimizer(weights[0]) # update model (to fix SourceChangeWarning)
# def parse_opt():
# parser = argparse.ArgumentParser()
# parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)')
# parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam')
# parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='(optional) dataset.yaml path')
# parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
# parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold')
# parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold')
# parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
# parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
# parser.add_argument('--view-img', action='store_true', help='show results')
# parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
# parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
# parser.add_argument('--save-crop', action='store_true', help='save cropped prediction boxes')
# parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
# parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
# parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
# parser.add_argument('--augment', action='store_true', help='augmented inference')
# parser.add_argument('--visualize', action='store_true', help='visualize features')
# parser.add_argument('--update', action='store_true', help='update all models')
# parser.add_argument('--project', default=ROOT / 'runs/detect', help='save results to project/name')
# parser.add_argument('--name', default='exp', help='save results to project/name')
# parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
# parser.add_argument('--line-thickness', default=3, type=int, help='bounding box thickness (pixels)')
# parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
# parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
# parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
# parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
# parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
# opt = parser.parse_args()
# opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
# print_args(vars(opt))
# return opt
# def main(opt):
# check_requirements(exclude=('tensorboard', 'thop'))
# run(**vars(opt))
# if __name__ == "__main__":
# opt = parse_opt()
# main(opt) | PypiClean |
/DSM_tools-1.1.0-py3-none-any.whl/dsmtools/utils/misc.py | from queue import LifoQueue
from typing import Generator, Callable, Any
def safe_recursion_bootstrap(func: Callable[[Any], Generator]) -> Callable[[Any], Any]:
"""Convert a recursion function to a stack-size-limitless one, without changing the recursion limit of your system.
It requires that:
1. your recursion function has been refactored to a **generator**, i.e. `yield` instead of `return` everywhere,
even for `return None`, which is often omitted at the end of the function. Also, `yield` for any call of itself
within the recursion.
2. The decorator adds a new keyword argument 'stack' to the recursion function, so your recursion function should
not contain a 'stack' argument itself.
3. Everytime you start the recursion, you must provide a new stack by keyword 'stack', as something
like `func(.., stack=LifoQueue())`, and within the function, the signature must be `func(..., stack=stack)` to
pass this stack along. Don't do anything else with the stack. The decorator will handle everything.
This way, you can call the recursion and retrieve the result same as the normal one.
### Explanations
To be short, it turns the searching process into a generator lets you initialize a stack to store the generators.
Long as there is memory for the stack, there will be no overflow. The speed is comparable to the bare recursion.
The reason for passing a new stack each time you call is for the compatibility for parallel computation. A decorator
with a stack in there is fine only when the decoration takes place locally.
The recursion runs like decorator -> func -> decorator -> return, so the recursion stack never exceeds 2.
"""
def wrapped_func(*args, stack: LifoQueue, **kwargs):
if not stack.empty(): # where next goes to, return the generator directly
return func(*args, stack=stack, **kwargs)
recur = func(*args, stack=stack, **kwargs) # initialized generator
while True:
if isinstance(recur, Generator):
stack.put(recur)
recur = next(recur)
else: # it's a number then, computation done for this branch
stack.get()
if stack.empty():
break
recur = stack.queue[-1].send(recur) # send the result back to its parent
return recur
return wrapped_func | PypiClean |
/GB2260-v2-0.2.1.tar.gz/GB2260-v2-0.2.1/gb2260_v2/data/curated/revision_201609.py | from __future__ import unicode_literals
name = '201609'
division_schema = {
'110000': '北京市',
'110101': '东城区',
'110102': '西城区',
'110105': '朝阳区',
'110106': '丰台区',
'110107': '石景山区',
'110108': '海淀区',
'110109': '门头沟区',
'110111': '房山区',
'110112': '通州区',
'110113': '顺义区',
'110114': '昌平区',
'110115': '大兴区',
'110116': '怀柔区',
'110117': '平谷区',
'110118': '密云区',
'110119': '延庆区',
'120000': '天津市',
'120101': '和平区',
'120102': '河东区',
'120103': '河西区',
'120104': '南开区',
'120105': '河北区',
'120106': '红桥区',
'120110': '东丽区',
'120111': '西青区',
'120112': '津南区',
'120113': '北辰区',
'120114': '武清区',
'120115': '宝坻区',
'120116': '滨海新区',
'120117': '宁河区',
'120118': '静海区',
'120225': '蓟县',
'130000': '河北省',
'130100': '石家庄市',
'130102': '长安区',
'130104': '桥西区',
'130105': '新华区',
'130107': '井陉矿区',
'130108': '裕华区',
'130109': '藁城区',
'130110': '鹿泉区',
'130111': '栾城区',
'130121': '井陉县',
'130123': '正定县',
'130125': '行唐县',
'130126': '灵寿县',
'130127': '高邑县',
'130128': '深泽县',
'130129': '赞皇县',
'130130': '无极县',
'130131': '平山县',
'130132': '元氏县',
'130133': '赵县',
'130181': '辛集市',
'130183': '晋州市',
'130184': '新乐市',
'130200': '唐山市',
'130202': '路南区',
'130203': '路北区',
'130204': '古冶区',
'130205': '开平区',
'130207': '丰南区',
'130208': '丰润区',
'130209': '曹妃甸区',
'130223': '滦县',
'130224': '滦南县',
'130225': '乐亭县',
'130227': '迁西县',
'130229': '玉田县',
'130281': '遵化市',
'130283': '迁安市',
'130300': '秦皇岛市',
'130302': '海港区',
'130303': '山海关区',
'130304': '北戴河区',
'130306': '抚宁区',
'130321': '青龙满族自治县',
'130322': '昌黎县',
'130324': '卢龙县',
'130400': '邯郸市',
'130402': '邯山区',
'130403': '丛台区',
'130404': '复兴区',
'130406': '峰峰矿区',
'130421': '邯郸县',
'130423': '临漳县',
'130424': '成安县',
'130425': '大名县',
'130426': '涉县',
'130427': '磁县',
'130428': '肥乡县',
'130429': '永年县',
'130430': '邱县',
'130431': '鸡泽县',
'130432': '广平县',
'130433': '馆陶县',
'130434': '魏县',
'130435': '曲周县',
'130481': '武安市',
'130500': '邢台市',
'130502': '桥东区',
'130503': '桥西区',
'130521': '邢台县',
'130522': '临城县',
'130523': '内丘县',
'130524': '柏乡县',
'130525': '隆尧县',
'130526': '任县',
'130527': '南和县',
'130528': '宁晋县',
'130529': '巨鹿县',
'130530': '新河县',
'130531': '广宗县',
'130532': '平乡县',
'130533': '威县',
'130534': '清河县',
'130535': '临西县',
'130581': '南宫市',
'130582': '沙河市',
'130600': '保定市',
'130602': '竞秀区',
'130604': '南市区',
'130606': '莲池区',
'130607': '满城区',
'130608': '清苑区',
'130609': '徐水区',
'130623': '涞水县',
'130624': '阜平县',
'130626': '定兴县',
'130627': '唐县',
'130628': '高阳县',
'130629': '容城县',
'130630': '涞源县',
'130631': '望都县',
'130632': '安新县',
'130633': '易县',
'130634': '曲阳县',
'130635': '蠡县',
'130636': '顺平县',
'130637': '博野县',
'130638': '雄县',
'130681': '涿州市',
'130682': '定州市',
'130683': '安国市',
'130684': '高碑店市',
'130700': '张家口市',
'130702': '桥东区',
'130703': '桥西区',
'130705': '宣化区',
'130706': '下花园区',
'130708': '万全区',
'130709': '崇礼区',
'130721': '宣化县',
'130722': '张北县',
'130723': '康保县',
'130724': '沽源县',
'130725': '尚义县',
'130726': '蔚县',
'130727': '阳原县',
'130728': '怀安县',
'130730': '怀来县',
'130731': '涿鹿县',
'130732': '赤城县',
'130800': '承德市',
'130802': '双桥区',
'130803': '双滦区',
'130804': '鹰手营子矿区',
'130821': '承德县',
'130822': '兴隆县',
'130823': '平泉县',
'130824': '滦平县',
'130825': '隆化县',
'130826': '丰宁满族自治县',
'130827': '宽城满族自治县',
'130828': '围场满族蒙古族自治县',
'130900': '沧州市',
'130902': '新华区',
'130903': '运河区',
'130921': '沧县',
'130922': '青县',
'130923': '东光县',
'130924': '海兴县',
'130925': '盐山县',
'130926': '肃宁县',
'130927': '南皮县',
'130928': '吴桥县',
'130929': '献县',
'130930': '孟村回族自治县',
'130981': '泊头市',
'130982': '任丘市',
'130983': '黄骅市',
'130984': '河间市',
'131000': '廊坊市',
'131002': '安次区',
'131003': '广阳区',
'131022': '固安县',
'131023': '永清县',
'131024': '香河县',
'131025': '大城县',
'131026': '文安县',
'131028': '大厂回族自治县',
'131081': '霸州市',
'131082': '三河市',
'131100': '衡水市',
'131102': '桃城区',
'131103': '冀州区',
'131121': '枣强县',
'131122': '武邑县',
'131123': '武强县',
'131124': '饶阳县',
'131125': '安平县',
'131126': '故城县',
'131127': '景县',
'131128': '阜城县',
'131182': '深州市',
'140000': '山西省',
'140100': '太原市',
'140105': '小店区',
'140106': '迎泽区',
'140107': '杏花岭区',
'140108': '尖草坪区',
'140109': '万柏林区',
'140110': '晋源区',
'140121': '清徐县',
'140122': '阳曲县',
'140123': '娄烦县',
'140181': '古交市',
'140200': '大同市',
'140202': '城区',
'140203': '矿区',
'140211': '南郊区',
'140212': '新荣区',
'140221': '阳高县',
'140222': '天镇县',
'140223': '广灵县',
'140224': '灵丘县',
'140225': '浑源县',
'140226': '左云县',
'140227': '大同县',
'140300': '阳泉市',
'140302': '城区',
'140303': '矿区',
'140311': '郊区',
'140321': '平定县',
'140322': '盂县',
'140400': '长治市',
'140402': '城区',
'140411': '郊区',
'140421': '长治县',
'140423': '襄垣县',
'140424': '屯留县',
'140425': '平顺县',
'140426': '黎城县',
'140427': '壶关县',
'140428': '长子县',
'140429': '武乡县',
'140430': '沁县',
'140431': '沁源县',
'140481': '潞城市',
'140500': '晋城市',
'140502': '城区',
'140521': '沁水县',
'140522': '阳城县',
'140524': '陵川县',
'140525': '泽州县',
'140581': '高平市',
'140600': '朔州市',
'140602': '朔城区',
'140603': '平鲁区',
'140621': '山阴县',
'140622': '应县',
'140623': '右玉县',
'140624': '怀仁县',
'140700': '晋中市',
'140702': '榆次区',
'140721': '榆社县',
'140722': '左权县',
'140723': '和顺县',
'140724': '昔阳县',
'140725': '寿阳县',
'140726': '太谷县',
'140727': '祁县',
'140728': '平遥县',
'140729': '灵石县',
'140781': '介休市',
'140800': '运城市',
'140802': '盐湖区',
'140821': '临猗县',
'140822': '万荣县',
'140823': '闻喜县',
'140824': '稷山县',
'140825': '新绛县',
'140826': '绛县',
'140827': '垣曲县',
'140828': '夏县',
'140829': '平陆县',
'140830': '芮城县',
'140881': '永济市',
'140882': '河津市',
'140900': '忻州市',
'140902': '忻府区',
'140921': '定襄县',
'140922': '五台县',
'140923': '代县',
'140924': '繁峙县',
'140925': '宁武县',
'140926': '静乐县',
'140927': '神池县',
'140928': '五寨县',
'140929': '岢岚县',
'140930': '河曲县',
'140931': '保德县',
'140932': '偏关县',
'140981': '原平市',
'141000': '临汾市',
'141002': '尧都区',
'141021': '曲沃县',
'141022': '翼城县',
'141023': '襄汾县',
'141024': '洪洞县',
'141025': '古县',
'141026': '安泽县',
'141027': '浮山县',
'141028': '吉县',
'141029': '乡宁县',
'141030': '大宁县',
'141031': '隰县',
'141032': '永和县',
'141033': '蒲县',
'141034': '汾西县',
'141081': '侯马市',
'141082': '霍州市',
'141100': '吕梁市',
'141102': '离石区',
'141121': '文水县',
'141122': '交城县',
'141123': '兴县',
'141124': '临县',
'141125': '柳林县',
'141126': '石楼县',
'141127': '岚县',
'141128': '方山县',
'141129': '中阳县',
'141130': '交口县',
'141181': '孝义市',
'141182': '汾阳市',
'150000': '内蒙古自治区',
'150100': '呼和浩特市',
'150102': '新城区',
'150103': '回民区',
'150104': '玉泉区',
'150105': '赛罕区',
'150121': '土默特左旗',
'150122': '托克托县',
'150123': '和林格尔县',
'150124': '清水河县',
'150125': '武川县',
'150200': '包头市',
'150202': '东河区',
'150203': '昆都仑区',
'150204': '青山区',
'150205': '石拐区',
'150206': '白云鄂博矿区',
'150207': '九原区',
'150221': '土默特右旗',
'150222': '固阳县',
'150223': '达尔罕茂明安联合旗',
'150300': '乌海市',
'150302': '海勃湾区',
'150303': '海南区',
'150304': '乌达区',
'150400': '赤峰市',
'150402': '红山区',
'150403': '元宝山区',
'150404': '松山区',
'150421': '阿鲁科尔沁旗',
'150422': '巴林左旗',
'150423': '巴林右旗',
'150424': '林西县',
'150425': '克什克腾旗',
'150426': '翁牛特旗',
'150428': '喀喇沁旗',
'150429': '宁城县',
'150430': '敖汉旗',
'150500': '通辽市',
'150502': '科尔沁区',
'150521': '科尔沁左翼中旗',
'150522': '科尔沁左翼后旗',
'150523': '开鲁县',
'150524': '库伦旗',
'150525': '奈曼旗',
'150526': '扎鲁特旗',
'150581': '霍林郭勒市',
'150600': '鄂尔多斯市',
'150602': '东胜区',
'150621': '达拉特旗',
'150622': '准格尔旗',
'150623': '鄂托克前旗',
'150624': '鄂托克旗',
'150625': '杭锦旗',
'150626': '乌审旗',
'150627': '伊金霍洛旗',
'150700': '呼伦贝尔市',
'150702': '海拉尔区',
'150703': '扎赉诺尔区',
'150721': '阿荣旗',
'150722': '莫力达瓦达斡尔族自治旗',
'150723': '鄂伦春自治旗',
'150724': '鄂温克族自治旗',
'150725': '陈巴尔虎旗',
'150726': '新巴尔虎左旗',
'150727': '新巴尔虎右旗',
'150781': '满洲里市',
'150782': '牙克石市',
'150783': '扎兰屯市',
'150784': '额尔古纳市',
'150785': '根河市',
'150800': '巴彦淖尔市',
'150802': '临河区',
'150821': '五原县',
'150822': '磴口县',
'150823': '乌拉特前旗',
'150824': '乌拉特中旗',
'150825': '乌拉特后旗',
'150826': '杭锦后旗',
'150900': '乌兰察布市',
'150902': '集宁区',
'150921': '卓资县',
'150922': '化德县',
'150923': '商都县',
'150924': '兴和县',
'150925': '凉城县',
'150926': '察哈尔右翼前旗',
'150927': '察哈尔右翼中旗',
'150928': '察哈尔右翼后旗',
'150929': '四子王旗',
'150981': '丰镇市',
'152200': '兴安盟',
'152201': '乌兰浩特市',
'152202': '阿尔山市',
'152221': '科尔沁右翼前旗',
'152222': '科尔沁右翼中旗',
'152223': '扎赉特旗',
'152224': '突泉县',
'152500': '锡林郭勒盟',
'152501': '二连浩特市',
'152502': '锡林浩特市',
'152522': '阿巴嘎旗',
'152523': '苏尼特左旗',
'152524': '苏尼特右旗',
'152525': '东乌珠穆沁旗',
'152526': '西乌珠穆沁旗',
'152527': '太仆寺旗',
'152528': '镶黄旗',
'152529': '正镶白旗',
'152530': '正蓝旗',
'152531': '多伦县',
'152900': '阿拉善盟',
'152921': '阿拉善左旗',
'152922': '阿拉善右旗',
'152923': '额济纳旗',
'210000': '辽宁省',
'210100': '沈阳市',
'210102': '和平区',
'210103': '沈河区',
'210104': '大东区',
'210105': '皇姑区',
'210106': '铁西区',
'210111': '苏家屯区',
'210112': '东陵区',
'210113': '沈北新区',
'210114': '于洪区',
'210115': '辽中区',
'210123': '康平县',
'210124': '法库县',
'210181': '新民市',
'210200': '大连市',
'210202': '中山区',
'210203': '西岗区',
'210204': '沙河口区',
'210211': '甘井子区',
'210212': '旅顺口区',
'210213': '金州区',
'210224': '长海县',
'210281': '瓦房店市',
'210214': '普兰店区',
'210283': '庄河市',
'210300': '鞍山市',
'210302': '铁东区',
'210303': '铁西区',
'210304': '立山区',
'210311': '千山区',
'210321': '台安县',
'210323': '岫岩满族自治县',
'210381': '海城市',
'210400': '抚顺市',
'210402': '新抚区',
'210403': '东洲区',
'210404': '望花区',
'210411': '顺城区',
'210421': '抚顺县',
'210422': '新宾满族自治县',
'210423': '清原满族自治县',
'210500': '本溪市',
'210502': '平山区',
'210503': '溪湖区',
'210504': '明山区',
'210505': '南芬区',
'210521': '本溪满族自治县',
'210522': '桓仁满族自治县',
'210600': '丹东市',
'210602': '元宝区',
'210603': '振兴区',
'210604': '振安区',
'210624': '宽甸满族自治县',
'210681': '东港市',
'210682': '凤城市',
'210700': '锦州市',
'210702': '古塔区',
'210703': '凌河区',
'210711': '太和区',
'210726': '黑山县',
'210727': '义县',
'210781': '凌海市',
'210782': '北镇市',
'210800': '营口市',
'210802': '站前区',
'210803': '西市区',
'210804': '鲅鱼圈区',
'210811': '老边区',
'210881': '盖州市',
'210882': '大石桥市',
'210900': '阜新市',
'210902': '海州区',
'210903': '新邱区',
'210904': '太平区',
'210905': '清河门区',
'210911': '细河区',
'210921': '阜新蒙古族自治县',
'210922': '彰武县',
'211000': '辽阳市',
'211002': '白塔区',
'211003': '文圣区',
'211004': '宏伟区',
'211005': '弓长岭区',
'211011': '太子河区',
'211021': '辽阳县',
'211081': '灯塔市',
'211100': '盘锦市',
'211102': '双台子区',
'211103': '兴隆台区',
'211104': '大洼区',
'211122': '盘山县',
'211200': '铁岭市',
'211202': '银州区',
'211204': '清河区',
'211221': '铁岭县',
'211223': '西丰县',
'211224': '昌图县',
'211281': '调兵山市',
'211282': '开原市',
'211300': '朝阳市',
'211302': '双塔区',
'211303': '龙城区',
'211321': '朝阳县',
'211322': '建平县',
'211324': '喀喇沁左翼蒙古族自治县',
'211381': '北票市',
'211382': '凌源市',
'211400': '葫芦岛市',
'211402': '连山区',
'211403': '龙港区',
'211404': '南票区',
'211421': '绥中县',
'211422': '建昌县',
'211481': '兴城市',
'220000': '吉林省',
'220100': '长春市',
'220102': '南关区',
'220103': '宽城区',
'220104': '朝阳区',
'220105': '二道区',
'220106': '绿园区',
'220112': '双阳区',
'220113': '九台区',
'220122': '农安县',
'220182': '榆树市',
'220183': '德惠市',
'220200': '吉林市',
'220202': '昌邑区',
'220203': '龙潭区',
'220204': '船营区',
'220211': '丰满区',
'220221': '永吉县',
'220281': '蛟河市',
'220282': '桦甸市',
'220283': '舒兰市',
'220284': '磐石市',
'220300': '四平市',
'220302': '铁西区',
'220303': '铁东区',
'220322': '梨树县',
'220323': '伊通满族自治县',
'220381': '公主岭市',
'220382': '双辽市',
'220400': '辽源市',
'220402': '龙山区',
'220403': '西安区',
'220421': '东丰县',
'220422': '东辽县',
'220500': '通化市',
'220502': '东昌区',
'220503': '二道江区',
'220521': '通化县',
'220523': '辉南县',
'220524': '柳河县',
'220581': '梅河口市',
'220582': '集安市',
'220600': '白山市',
'220602': '浑江区',
'220605': '江源区',
'220621': '抚松县',
'220622': '靖宇县',
'220623': '长白朝鲜族自治县',
'220681': '临江市',
'220700': '松原市',
'220702': '宁江区',
'220721': '前郭尔罗斯蒙古族自治县',
'220722': '长岭县',
'220723': '乾安县',
'220781': '扶余市',
'220800': '白城市',
'220802': '洮北区',
'220821': '镇赉县',
'220822': '通榆县',
'220881': '洮南市',
'220882': '大安市',
'222400': '延边朝鲜族自治州',
'222401': '延吉市',
'222402': '图们市',
'222403': '敦化市',
'222404': '珲春市',
'222405': '龙井市',
'222406': '和龙市',
'222424': '汪清县',
'222426': '安图县',
'230000': '黑龙江省',
'230100': '哈尔滨市',
'230102': '道里区',
'230103': '南岗区',
'230104': '道外区',
'230108': '平房区',
'230109': '松北区',
'230110': '香坊区',
'230111': '呼兰区',
'230112': '阿城区',
'230113': '双城区',
'230123': '依兰县',
'230124': '方正县',
'230125': '宾县',
'230126': '巴彦县',
'230127': '木兰县',
'230128': '通河县',
'230129': '延寿县',
'230183': '尚志市',
'230184': '五常市',
'230200': '齐齐哈尔市',
'230202': '龙沙区',
'230203': '建华区',
'230204': '铁锋区',
'230205': '昂昂溪区',
'230206': '富拉尔基区',
'230207': '碾子山区',
'230208': '梅里斯达斡尔族区',
'230221': '龙江县',
'230223': '依安县',
'230224': '泰来县',
'230225': '甘南县',
'230227': '富裕县',
'230229': '克山县',
'230230': '克东县',
'230231': '拜泉县',
'230281': '讷河市',
'230300': '鸡西市',
'230302': '鸡冠区',
'230303': '恒山区',
'230304': '滴道区',
'230305': '梨树区',
'230306': '城子河区',
'230307': '麻山区',
'230321': '鸡东县',
'230381': '虎林市',
'230382': '密山市',
'230400': '鹤岗市',
'230402': '向阳区',
'230403': '工农区',
'230404': '南山区',
'230405': '兴安区',
'230406': '东山区',
'230407': '兴山区',
'230421': '萝北县',
'230422': '绥滨县',
'230500': '双鸭山市',
'230502': '尖山区',
'230503': '岭东区',
'230505': '四方台区',
'230506': '宝山区',
'230521': '集贤县',
'230522': '友谊县',
'230523': '宝清县',
'230524': '饶河县',
'230600': '大庆市',
'230602': '萨尔图区',
'230603': '龙凤区',
'230604': '让胡路区',
'230605': '红岗区',
'230606': '大同区',
'230621': '肇州县',
'230622': '肇源县',
'230623': '林甸县',
'230624': '杜尔伯特蒙古族自治县',
'230700': '伊春市',
'230702': '伊春区',
'230703': '南岔区',
'230704': '友好区',
'230705': '西林区',
'230706': '翠峦区',
'230707': '新青区',
'230708': '美溪区',
'230709': '金山屯区',
'230710': '五营区',
'230711': '乌马河区',
'230712': '汤旺河区',
'230713': '带岭区',
'230714': '乌伊岭区',
'230715': '红星区',
'230716': '上甘岭区',
'230722': '嘉荫县',
'230781': '铁力市',
'230800': '佳木斯市',
'230803': '向阳区',
'230804': '前进区',
'230805': '东风区',
'230811': '郊区',
'230822': '桦南县',
'230826': '桦川县',
'230828': '汤原县',
'230881': '同江市',
'230882': '富锦市',
'230883': '抚远市',
'230900': '七台河市',
'230902': '新兴区',
'230903': '桃山区',
'230904': '茄子河区',
'230921': '勃利县',
'231000': '牡丹江市',
'231002': '东安区',
'231003': '阳明区',
'231004': '爱民区',
'231005': '西安区',
'231025': '林口县',
'231081': '绥芬河市',
'231083': '海林市',
'231084': '宁安市',
'231085': '穆棱市',
'231086': '东宁市',
'231100': '黑河市',
'231102': '爱辉区',
'231121': '嫩江县',
'231123': '逊克县',
'231124': '孙吴县',
'231181': '北安市',
'231182': '五大连池市',
'231200': '绥化市',
'231202': '北林区',
'231221': '望奎县',
'231222': '兰西县',
'231223': '青冈县',
'231224': '庆安县',
'231225': '明水县',
'231226': '绥棱县',
'231281': '安达市',
'231282': '肇东市',
'231283': '海伦市',
'232700': '大兴安岭地区',
'232721': '呼玛县',
'232722': '塔河县',
'232723': '漠河县',
'310000': '上海市',
'310101': '黄浦区',
'310104': '徐汇区',
'310105': '长宁区',
'310106': '静安区',
'310107': '普陀区',
'310109': '虹口区',
'310110': '杨浦区',
'310112': '闵行区',
'310113': '宝山区',
'310114': '嘉定区',
'310115': '浦东新区',
'310116': '金山区',
'310117': '松江区',
'310118': '青浦区',
'310120': '奉贤区',
'310151': '崇明区',
'320000': '江苏省',
'320100': '南京市',
'320102': '玄武区',
'320104': '秦淮区',
'320105': '建邺区',
'320106': '鼓楼区',
'320111': '浦口区',
'320113': '栖霞区',
'320114': '雨花台区',
'320115': '江宁区',
'320116': '六合区',
'320117': '溧水区',
'320118': '高淳区',
'320200': '无锡市',
'320205': '锡山区',
'320206': '惠山区',
'320211': '滨湖区',
'320213': '梁溪区',
'320214': '新吴区',
'320281': '江阴市',
'320282': '宜兴市',
'320300': '徐州市',
'320302': '鼓楼区',
'320303': '云龙区',
'320305': '贾汪区',
'320311': '泉山区',
'320312': '铜山区',
'320321': '丰县',
'320322': '沛县',
'320324': '睢宁县',
'320381': '新沂市',
'320382': '邳州市',
'320400': '常州市',
'320402': '天宁区',
'320404': '钟楼区',
'320411': '新北区',
'320412': '武进区',
'320413': '金坛区',
'320481': '溧阳市',
'320500': '苏州市',
'320505': '虎丘区',
'320506': '吴中区',
'320507': '相城区',
'320508': '姑苏区',
'320509': '吴江区',
'320581': '常熟市',
'320582': '张家港市',
'320583': '昆山市',
'320585': '太仓市',
'320600': '南通市',
'320602': '崇川区',
'320611': '港闸区',
'320612': '通州区',
'320621': '海安县',
'320623': '如东县',
'320681': '启东市',
'320682': '如皋市',
'320684': '海门市',
'320700': '连云港市',
'320703': '连云区',
'320706': '海州区',
'320707': '赣榆区',
'320722': '东海县',
'320723': '灌云县',
'320724': '灌南县',
'320800': '淮安市',
'320802': '清河区',
'320803': '淮安区',
'320804': '淮阴区',
'320811': '清浦区',
'320813': '洪泽区',
'320826': '涟水县',
'320830': '盱眙县',
'320831': '金湖县',
'320900': '盐城市',
'320902': '亭湖区',
'320903': '盐都区',
'320904': '大丰区',
'320921': '响水县',
'320922': '滨海县',
'320923': '阜宁县',
'320924': '射阳县',
'320925': '建湖县',
'320981': '东台市',
'321000': '扬州市',
'321002': '广陵区',
'321003': '邗江区',
'321012': '江都区',
'321023': '宝应县',
'321081': '仪征市',
'321084': '高邮市',
'321100': '镇江市',
'321102': '京口区',
'321111': '润州区',
'321112': '丹徒区',
'321181': '丹阳市',
'321182': '扬中市',
'321183': '句容市',
'321200': '泰州市',
'321202': '海陵区',
'321203': '高港区',
'321204': '姜堰区',
'321281': '兴化市',
'321282': '靖江市',
'321283': '泰兴市',
'321300': '宿迁市',
'321302': '宿城区',
'321311': '宿豫区',
'321322': '沭阳县',
'321323': '泗阳县',
'321324': '泗洪县',
'330000': '浙江省',
'330100': '杭州市',
'330102': '上城区',
'330103': '下城区',
'330104': '江干区',
'330105': '拱墅区',
'330106': '西湖区',
'330108': '滨江区',
'330109': '萧山区',
'330110': '余杭区',
'330111': '富阳区',
'330122': '桐庐县',
'330127': '淳安县',
'330182': '建德市',
'330185': '临安市',
'330200': '宁波市',
'330203': '海曙区',
'330204': '江东区',
'330205': '江北区',
'330206': '北仑区',
'330211': '镇海区',
'330212': '鄞州区',
'330225': '象山县',
'330226': '宁海县',
'330281': '余姚市',
'330282': '慈溪市',
'330283': '奉化市',
'330300': '温州市',
'330302': '鹿城区',
'330303': '龙湾区',
'330304': '瓯海区',
'330305': '洞头区',
'330324': '永嘉县',
'330326': '平阳县',
'330327': '苍南县',
'330328': '文成县',
'330329': '泰顺县',
'330381': '瑞安市',
'330382': '乐清市',
'330400': '嘉兴市',
'330402': '南湖区',
'330411': '秀洲区',
'330421': '嘉善县',
'330424': '海盐县',
'330481': '海宁市',
'330482': '平湖市',
'330483': '桐乡市',
'330500': '湖州市',
'330502': '吴兴区',
'330503': '南浔区',
'330521': '德清县',
'330522': '长兴县',
'330523': '安吉县',
'330600': '绍兴市',
'330602': '越城区',
'330603': '柯桥区',
'330604': '上虞区',
'330624': '新昌县',
'330681': '诸暨市',
'330683': '嵊州市',
'330700': '金华市',
'330702': '婺城区',
'330703': '金东区',
'330723': '武义县',
'330726': '浦江县',
'330727': '磐安县',
'330781': '兰溪市',
'330782': '义乌市',
'330783': '东阳市',
'330784': '永康市',
'330800': '衢州市',
'330802': '柯城区',
'330803': '衢江区',
'330822': '常山县',
'330824': '开化县',
'330825': '龙游县',
'330881': '江山市',
'330900': '舟山市',
'330902': '定海区',
'330903': '普陀区',
'330921': '岱山县',
'330922': '嵊泗县',
'331000': '台州市',
'331002': '椒江区',
'331003': '黄岩区',
'331004': '路桥区',
'331021': '玉环县',
'331022': '三门县',
'331023': '天台县',
'331024': '仙居县',
'331081': '温岭市',
'331082': '临海市',
'331100': '丽水市',
'331102': '莲都区',
'331121': '青田县',
'331122': '缙云县',
'331123': '遂昌县',
'331124': '松阳县',
'331125': '云和县',
'331126': '庆元县',
'331127': '景宁畲族自治县',
'331181': '龙泉市',
'340000': '安徽省',
'340100': '合肥市',
'340102': '瑶海区',
'340103': '庐阳区',
'340104': '蜀山区',
'340111': '包河区',
'340121': '长丰县',
'340122': '肥东县',
'340123': '肥西县',
'340124': '庐江县',
'340181': '巢湖市',
'340200': '芜湖市',
'340202': '镜湖区',
'340203': '弋江区',
'340207': '鸠江区',
'340208': '三山区',
'340221': '芜湖县',
'340222': '繁昌县',
'340223': '南陵县',
'340225': '无为县',
'340300': '蚌埠市',
'340302': '龙子湖区',
'340303': '蚌山区',
'340304': '禹会区',
'340311': '淮上区',
'340321': '怀远县',
'340322': '五河县',
'340323': '固镇县',
'340400': '淮南市',
'340402': '大通区',
'340403': '田家庵区',
'340404': '谢家集区',
'340405': '八公山区',
'340406': '潘集区',
'340421': '凤台县',
'340422': '寿县',
'340500': '马鞍山市',
'340503': '花山区',
'340504': '雨山区',
'340506': '博望区',
'340521': '当涂县',
'340522': '含山县',
'340523': '和县',
'340600': '淮北市',
'340602': '杜集区',
'340603': '相山区',
'340604': '烈山区',
'340621': '濉溪县',
'340700': '铜陵市',
'340705': '铜官区',
'340706': '义安区',
'340711': '郊区',
'340722': '枞阳县',
'340800': '安庆市',
'340802': '迎江区',
'340803': '大观区',
'340811': '宜秀区',
'340822': '怀宁县',
'340824': '潜山县',
'340825': '太湖县',
'340826': '宿松县',
'340827': '望江县',
'340828': '岳西县',
'340881': '桐城市',
'341000': '黄山市',
'341002': '屯溪区',
'341003': '黄山区',
'341004': '徽州区',
'341021': '歙县',
'341022': '休宁县',
'341023': '黟县',
'341024': '祁门县',
'341100': '滁州市',
'341102': '琅琊区',
'341103': '南谯区',
'341122': '来安县',
'341124': '全椒县',
'341125': '定远县',
'341126': '凤阳县',
'341181': '天长市',
'341182': '明光市',
'341200': '阜阳市',
'341202': '颍州区',
'341203': '颍东区',
'341204': '颍泉区',
'341221': '临泉县',
'341222': '太和县',
'341225': '阜南县',
'341226': '颍上县',
'341282': '界首市',
'341300': '宿州市',
'341302': '埇桥区',
'341321': '砀山县',
'341322': '萧县',
'341323': '灵璧县',
'341324': '泗县',
'341500': '六安市',
'341502': '金安区',
'341503': '裕安区',
'341504': '叶集区',
'341522': '霍邱县',
'341523': '舒城县',
'341524': '金寨县',
'341525': '霍山县',
'341600': '亳州市',
'341602': '谯城区',
'341621': '涡阳县',
'341622': '蒙城县',
'341623': '利辛县',
'341700': '池州市',
'341702': '贵池区',
'341721': '东至县',
'341722': '石台县',
'341723': '青阳县',
'341800': '宣城市',
'341802': '宣州区',
'341821': '郎溪县',
'341822': '广德县',
'341823': '泾县',
'341824': '绩溪县',
'341825': '旌德县',
'341881': '宁国市',
'350000': '福建省',
'350100': '福州市',
'350102': '鼓楼区',
'350103': '台江区',
'350104': '仓山区',
'350105': '马尾区',
'350111': '晋安区',
'350121': '闽侯县',
'350122': '连江县',
'350123': '罗源县',
'350124': '闽清县',
'350125': '永泰县',
'350128': '平潭县',
'350181': '福清市',
'350182': '长乐市',
'350200': '厦门市',
'350203': '思明区',
'350205': '海沧区',
'350206': '湖里区',
'350211': '集美区',
'350212': '同安区',
'350213': '翔安区',
'350300': '莆田市',
'350302': '城厢区',
'350303': '涵江区',
'350304': '荔城区',
'350305': '秀屿区',
'350322': '仙游县',
'350400': '三明市',
'350402': '梅列区',
'350403': '三元区',
'350421': '明溪县',
'350423': '清流县',
'350424': '宁化县',
'350425': '大田县',
'350426': '尤溪县',
'350427': '沙县',
'350428': '将乐县',
'350429': '泰宁县',
'350430': '建宁县',
'350481': '永安市',
'350500': '泉州市',
'350502': '鲤城区',
'350503': '丰泽区',
'350504': '洛江区',
'350505': '泉港区',
'350521': '惠安县',
'350524': '安溪县',
'350525': '永春县',
'350526': '德化县',
'350527': '金门县',
'350581': '石狮市',
'350582': '晋江市',
'350583': '南安市',
'350600': '漳州市',
'350602': '芗城区',
'350603': '龙文区',
'350622': '云霄县',
'350623': '漳浦县',
'350624': '诏安县',
'350625': '长泰县',
'350626': '东山县',
'350627': '南靖县',
'350628': '平和县',
'350629': '华安县',
'350681': '龙海市',
'350700': '南平市',
'350702': '延平区',
'350703': '建阳区',
'350721': '顺昌县',
'350722': '浦城县',
'350723': '光泽县',
'350724': '松溪县',
'350725': '政和县',
'350781': '邵武市',
'350782': '武夷山市',
'350783': '建瓯市',
'350800': '龙岩市',
'350802': '新罗区',
'350803': '永定区',
'350821': '长汀县',
'350823': '上杭县',
'350824': '武平县',
'350825': '连城县',
'350881': '漳平市',
'350900': '宁德市',
'350902': '蕉城区',
'350921': '霞浦县',
'350922': '古田县',
'350923': '屏南县',
'350924': '寿宁县',
'350925': '周宁县',
'350926': '柘荣县',
'350981': '福安市',
'350982': '福鼎市',
'360000': '江西省',
'360100': '南昌市',
'360102': '东湖区',
'360103': '西湖区',
'360104': '青云谱区',
'360105': '湾里区',
'360111': '青山湖区',
'360112': '新建区',
'360121': '南昌县',
'360123': '安义县',
'360124': '进贤县',
'360200': '景德镇市',
'360202': '昌江区',
'360203': '珠山区',
'360222': '浮梁县',
'360281': '乐平市',
'360300': '萍乡市',
'360302': '安源区',
'360313': '湘东区',
'360321': '莲花县',
'360322': '上栗县',
'360323': '芦溪县',
'360400': '九江市',
'360402': '庐山区',
'360403': '浔阳区',
'360421': '九江县',
'360423': '武宁县',
'360424': '修水县',
'360425': '永修县',
'360426': '德安县',
'360427': '星子县',
'360428': '都昌县',
'360429': '湖口县',
'360430': '彭泽县',
'360481': '瑞昌市',
'360482': '共青城市',
'360500': '新余市',
'360502': '渝水区',
'360521': '分宜县',
'360600': '鹰潭市',
'360602': '月湖区',
'360622': '余江县',
'360681': '贵溪市',
'360700': '赣州市',
'360702': '章贡区',
'360703': '南康区',
'360721': '赣县',
'360722': '信丰县',
'360723': '大余县',
'360724': '上犹县',
'360725': '崇义县',
'360726': '安远县',
'360727': '龙南县',
'360728': '定南县',
'360729': '全南县',
'360730': '宁都县',
'360731': '于都县',
'360732': '兴国县',
'360733': '会昌县',
'360734': '寻乌县',
'360735': '石城县',
'360781': '瑞金市',
'360800': '吉安市',
'360802': '吉州区',
'360803': '青原区',
'360821': '吉安县',
'360822': '吉水县',
'360823': '峡江县',
'360824': '新干县',
'360825': '永丰县',
'360826': '泰和县',
'360827': '遂川县',
'360828': '万安县',
'360829': '安福县',
'360830': '永新县',
'360881': '井冈山市',
'360900': '宜春市',
'360902': '袁州区',
'360921': '奉新县',
'360922': '万载县',
'360923': '上高县',
'360924': '宜丰县',
'360925': '靖安县',
'360926': '铜鼓县',
'360981': '丰城市',
'360982': '樟树市',
'360983': '高安市',
'361000': '抚州市',
'361002': '临川区',
'361021': '南城县',
'361022': '黎川县',
'361023': '南丰县',
'361024': '崇仁县',
'361025': '乐安县',
'361026': '宜黄县',
'361027': '金溪县',
'361028': '资溪县',
'361029': '东乡县',
'361030': '广昌县',
'361100': '上饶市',
'361102': '信州区',
'361121': '上饶县',
'361103': '广丰区',
'361123': '玉山县',
'361124': '铅山县',
'361125': '横峰县',
'361126': '弋阳县',
'361127': '余干县',
'361128': '鄱阳县',
'361129': '万年县',
'361130': '婺源县',
'361181': '德兴市',
'370000': '山东省',
'370100': '济南市',
'370102': '历下区',
'370103': '市中区',
'370104': '槐荫区',
'370105': '天桥区',
'370112': '历城区',
'370113': '长清区',
'370124': '平阴县',
'370125': '济阳县',
'370126': '商河县',
'370181': '章丘市',
'370200': '青岛市',
'370202': '市南区',
'370203': '市北区',
'370211': '黄岛区',
'370212': '崂山区',
'370213': '李沧区',
'370214': '城阳区',
'370281': '胶州市',
'370282': '即墨市',
'370283': '平度市',
'370285': '莱西市',
'370300': '淄博市',
'370302': '淄川区',
'370303': '张店区',
'370304': '博山区',
'370305': '临淄区',
'370306': '周村区',
'370321': '桓台县',
'370322': '高青县',
'370323': '沂源县',
'370400': '枣庄市',
'370402': '市中区',
'370403': '薛城区',
'370404': '峄城区',
'370405': '台儿庄区',
'370406': '山亭区',
'370481': '滕州市',
'370500': '东营市',
'370502': '东营区',
'370503': '河口区',
'370505': '垦利区',
'370522': '利津县',
'370523': '广饶县',
'370600': '烟台市',
'370602': '芝罘区',
'370611': '福山区',
'370612': '牟平区',
'370613': '莱山区',
'370634': '长岛县',
'370681': '龙口市',
'370682': '莱阳市',
'370683': '莱州市',
'370684': '蓬莱市',
'370685': '招远市',
'370686': '栖霞市',
'370687': '海阳市',
'370700': '潍坊市',
'370702': '潍城区',
'370703': '寒亭区',
'370704': '坊子区',
'370705': '奎文区',
'370724': '临朐县',
'370725': '昌乐县',
'370781': '青州市',
'370782': '诸城市',
'370783': '寿光市',
'370784': '安丘市',
'370785': '高密市',
'370786': '昌邑市',
'370800': '济宁市',
'370811': '任城区',
'370812': '兖州区',
'370826': '微山县',
'370827': '鱼台县',
'370828': '金乡县',
'370829': '嘉祥县',
'370830': '汶上县',
'370831': '泗水县',
'370832': '梁山县',
'370881': '曲阜市',
'370883': '邹城市',
'370900': '泰安市',
'370902': '泰山区',
'370911': '岱岳区',
'370921': '宁阳县',
'370923': '东平县',
'370982': '新泰市',
'370983': '肥城市',
'371000': '威海市',
'371002': '环翠区',
'371003': '文登区',
'371082': '荣成市',
'371083': '乳山市',
'371100': '日照市',
'371102': '东港区',
'371103': '岚山区',
'371121': '五莲县',
'371122': '莒县',
'371200': '莱芜市',
'371202': '莱城区',
'371203': '钢城区',
'371300': '临沂市',
'371302': '兰山区',
'371311': '罗庄区',
'371312': '河东区',
'371321': '沂南县',
'371322': '郯城县',
'371323': '沂水县',
'371324': '兰陵县',
'371325': '费县',
'371326': '平邑县',
'371327': '莒南县',
'371328': '蒙阴县',
'371329': '临沭县',
'371400': '德州市',
'371402': '德城区',
'371403': '陵城区',
'371422': '宁津县',
'371423': '庆云县',
'371424': '临邑县',
'371425': '齐河县',
'371426': '平原县',
'371427': '夏津县',
'371428': '武城县',
'371481': '乐陵市',
'371482': '禹城市',
'371500': '聊城市',
'371502': '东昌府区',
'371521': '阳谷县',
'371522': '莘县',
'371523': '茌平县',
'371524': '东阿县',
'371525': '冠县',
'371526': '高唐县',
'371581': '临清市',
'371600': '滨州市',
'371602': '滨城区',
'371603': '沾化区',
'371621': '惠民县',
'371622': '阳信县',
'371623': '无棣县',
'371625': '博兴县',
'371626': '邹平县',
'371700': '菏泽市',
'371702': '牡丹区',
'371703': '定陶区',
'371721': '曹县',
'371722': '单县',
'371723': '成武县',
'371724': '巨野县',
'371725': '郓城县',
'371726': '鄄城县',
'371728': '东明县',
'410000': '河南省',
'410100': '郑州市',
'410102': '中原区',
'410103': '二七区',
'410104': '管城回族区',
'410105': '金水区',
'410106': '上街区',
'410108': '惠济区',
'410122': '中牟县',
'410181': '巩义市',
'410182': '荥阳市',
'410183': '新密市',
'410184': '新郑市',
'410185': '登封市',
'410200': '开封市',
'410202': '龙亭区',
'410203': '顺河回族区',
'410204': '鼓楼区',
'410205': '禹王台区',
'410212': '祥符区',
'410221': '杞县',
'410222': '通许县',
'410223': '尉氏县',
'410225': '兰考县',
'410300': '洛阳市',
'410302': '老城区',
'410303': '西工区',
'410304': '瀍河回族区',
'410305': '涧西区',
'410306': '吉利区',
'410311': '洛龙区',
'410322': '孟津县',
'410323': '新安县',
'410324': '栾川县',
'410325': '嵩县',
'410326': '汝阳县',
'410327': '宜阳县',
'410328': '洛宁县',
'410329': '伊川县',
'410381': '偃师市',
'410400': '平顶山市',
'410402': '新华区',
'410403': '卫东区',
'410404': '石龙区',
'410411': '湛河区',
'410421': '宝丰县',
'410422': '叶县',
'410423': '鲁山县',
'410425': '郏县',
'410481': '舞钢市',
'410482': '汝州市',
'410500': '安阳市',
'410502': '文峰区',
'410503': '北关区',
'410505': '殷都区',
'410506': '龙安区',
'410522': '安阳县',
'410523': '汤阴县',
'410526': '滑县',
'410527': '内黄县',
'410581': '林州市',
'410600': '鹤壁市',
'410602': '鹤山区',
'410603': '山城区',
'410611': '淇滨区',
'410621': '浚县',
'410622': '淇县',
'410700': '新乡市',
'410702': '红旗区',
'410703': '卫滨区',
'410704': '凤泉区',
'410711': '牧野区',
'410721': '新乡县',
'410724': '获嘉县',
'410725': '原阳县',
'410726': '延津县',
'410727': '封丘县',
'410728': '长垣县',
'410781': '卫辉市',
'410782': '辉县市',
'410800': '焦作市',
'410802': '解放区',
'410803': '中站区',
'410804': '马村区',
'410811': '山阳区',
'410821': '修武县',
'410822': '博爱县',
'410823': '武陟县',
'410825': '温县',
'410882': '沁阳市',
'410883': '孟州市',
'410900': '濮阳市',
'410902': '华龙区',
'410922': '清丰县',
'410923': '南乐县',
'410926': '范县',
'410927': '台前县',
'410928': '濮阳县',
'411000': '许昌市',
'411002': '魏都区',
'411023': '许昌县',
'411024': '鄢陵县',
'411025': '襄城县',
'411081': '禹州市',
'411082': '长葛市',
'411100': '漯河市',
'411102': '源汇区',
'411103': '郾城区',
'411104': '召陵区',
'411121': '舞阳县',
'411122': '临颍县',
'411200': '三门峡市',
'411202': '湖滨区',
'411203': '陕州区',
'411221': '渑池县',
'411224': '卢氏县',
'411281': '义马市',
'411282': '灵宝市',
'411300': '南阳市',
'411302': '宛城区',
'411303': '卧龙区',
'411321': '南召县',
'411322': '方城县',
'411323': '西峡县',
'411324': '镇平县',
'411325': '内乡县',
'411326': '淅川县',
'411327': '社旗县',
'411328': '唐河县',
'411329': '新野县',
'411330': '桐柏县',
'411381': '邓州市',
'411400': '商丘市',
'411402': '梁园区',
'411403': '睢阳区',
'411421': '民权县',
'411422': '睢县',
'411423': '宁陵县',
'411424': '柘城县',
'411425': '虞城县',
'411426': '夏邑县',
'411481': '永城市',
'411500': '信阳市',
'411502': '浉河区',
'411503': '平桥区',
'411521': '罗山县',
'411522': '光山县',
'411523': '新县',
'411524': '商城县',
'411525': '固始县',
'411526': '潢川县',
'411527': '淮滨县',
'411528': '息县',
'411600': '周口市',
'411602': '川汇区',
'411621': '扶沟县',
'411622': '西华县',
'411623': '商水县',
'411624': '沈丘县',
'411625': '郸城县',
'411626': '淮阳县',
'411627': '太康县',
'411628': '鹿邑县',
'411681': '项城市',
'411700': '驻马店市',
'411702': '驿城区',
'411721': '西平县',
'411722': '上蔡县',
'411723': '平舆县',
'411724': '正阳县',
'411725': '确山县',
'411726': '泌阳县',
'411727': '汝南县',
'411728': '遂平县',
'411729': '新蔡县',
'419001': '济源市',
'420000': '湖北省',
'420100': '武汉市',
'420102': '江岸区',
'420103': '江汉区',
'420104': '硚口区',
'420105': '汉阳区',
'420106': '武昌区',
'420107': '青山区',
'420111': '洪山区',
'420112': '东西湖区',
'420113': '汉南区',
'420114': '蔡甸区',
'420115': '江夏区',
'420116': '黄陂区',
'420117': '新洲区',
'420200': '黄石市',
'420202': '黄石港区',
'420203': '西塞山区',
'420204': '下陆区',
'420205': '铁山区',
'420222': '阳新县',
'420281': '大冶市',
'420300': '十堰市',
'420302': '茅箭区',
'420303': '张湾区',
'420304': '郧阳区',
'420322': '郧西县',
'420323': '竹山县',
'420324': '竹溪县',
'420325': '房县',
'420381': '丹江口市',
'420500': '宜昌市',
'420502': '西陵区',
'420503': '伍家岗区',
'420504': '点军区',
'420505': '猇亭区',
'420506': '夷陵区',
'420525': '远安县',
'420526': '兴山县',
'420527': '秭归县',
'420528': '长阳土家族自治县',
'420529': '五峰土家族自治县',
'420581': '宜都市',
'420582': '当阳市',
'420583': '枝江市',
'420600': '襄阳市',
'420602': '襄城区',
'420606': '樊城区',
'420607': '襄州区',
'420624': '南漳县',
'420625': '谷城县',
'420626': '保康县',
'420682': '老河口市',
'420683': '枣阳市',
'420684': '宜城市',
'420700': '鄂州市',
'420702': '梁子湖区',
'420703': '华容区',
'420704': '鄂城区',
'420800': '荆门市',
'420802': '东宝区',
'420804': '掇刀区',
'420821': '京山县',
'420822': '沙洋县',
'420881': '钟祥市',
'420900': '孝感市',
'420902': '孝南区',
'420921': '孝昌县',
'420922': '大悟县',
'420923': '云梦县',
'420981': '应城市',
'420982': '安陆市',
'420984': '汉川市',
'421000': '荆州市',
'421002': '沙市区',
'421003': '荆州区',
'421022': '公安县',
'421023': '监利县',
'421024': '江陵县',
'421081': '石首市',
'421083': '洪湖市',
'421087': '松滋市',
'421100': '黄冈市',
'421102': '黄州区',
'421121': '团风县',
'421122': '红安县',
'421123': '罗田县',
'421124': '英山县',
'421125': '浠水县',
'421126': '蕲春县',
'421127': '黄梅县',
'421181': '麻城市',
'421182': '武穴市',
'421200': '咸宁市',
'421202': '咸安区',
'421221': '嘉鱼县',
'421222': '通城县',
'421223': '崇阳县',
'421224': '通山县',
'421281': '赤壁市',
'421300': '随州市',
'421303': '曾都区',
'421321': '随县',
'421381': '广水市',
'422800': '恩施土家族苗族自治州',
'422801': '恩施市',
'422802': '利川市',
'422822': '建始县',
'422823': '巴东县',
'422825': '宣恩县',
'422826': '咸丰县',
'422827': '来凤县',
'422828': '鹤峰县',
'429004': '仙桃市',
'429005': '潜江市',
'429006': '天门市',
'429021': '神农架林区',
'430000': '湖南省',
'430100': '长沙市',
'430102': '芙蓉区',
'430103': '天心区',
'430104': '岳麓区',
'430105': '开福区',
'430111': '雨花区',
'430112': '望城区',
'430121': '长沙县',
'430124': '宁乡县',
'430181': '浏阳市',
'430200': '株洲市',
'430202': '荷塘区',
'430203': '芦淞区',
'430204': '石峰区',
'430211': '天元区',
'430221': '株洲县',
'430223': '攸县',
'430224': '茶陵县',
'430225': '炎陵县',
'430281': '醴陵市',
'430300': '湘潭市',
'430302': '雨湖区',
'430304': '岳塘区',
'430321': '湘潭县',
'430381': '湘乡市',
'430382': '韶山市',
'430400': '衡阳市',
'430405': '珠晖区',
'430406': '雁峰区',
'430407': '石鼓区',
'430408': '蒸湘区',
'430412': '南岳区',
'430421': '衡阳县',
'430422': '衡南县',
'430423': '衡山县',
'430424': '衡东县',
'430426': '祁东县',
'430481': '耒阳市',
'430482': '常宁市',
'430500': '邵阳市',
'430502': '双清区',
'430503': '大祥区',
'430511': '北塔区',
'430521': '邵东县',
'430522': '新邵县',
'430523': '邵阳县',
'430524': '隆回县',
'430525': '洞口县',
'430527': '绥宁县',
'430528': '新宁县',
'430529': '城步苗族自治县',
'430581': '武冈市',
'430600': '岳阳市',
'430602': '岳阳楼区',
'430603': '云溪区',
'430611': '君山区',
'430621': '岳阳县',
'430623': '华容县',
'430624': '湘阴县',
'430626': '平江县',
'430681': '汨罗市',
'430682': '临湘市',
'430700': '常德市',
'430702': '武陵区',
'430703': '鼎城区',
'430721': '安乡县',
'430722': '汉寿县',
'430723': '澧县',
'430724': '临澧县',
'430725': '桃源县',
'430726': '石门县',
'430781': '津市市',
'430800': '张家界市',
'430802': '永定区',
'430811': '武陵源区',
'430821': '慈利县',
'430822': '桑植县',
'430900': '益阳市',
'430902': '资阳区',
'430903': '赫山区',
'430921': '南县',
'430922': '桃江县',
'430923': '安化县',
'430981': '沅江市',
'431000': '郴州市',
'431002': '北湖区',
'431003': '苏仙区',
'431021': '桂阳县',
'431022': '宜章县',
'431023': '永兴县',
'431024': '嘉禾县',
'431025': '临武县',
'431026': '汝城县',
'431027': '桂东县',
'431028': '安仁县',
'431081': '资兴市',
'431100': '永州市',
'431102': '零陵区',
'431103': '冷水滩区',
'431121': '祁阳县',
'431122': '东安县',
'431123': '双牌县',
'431124': '道县',
'431125': '江永县',
'431126': '宁远县',
'431127': '蓝山县',
'431128': '新田县',
'431129': '江华瑶族自治县',
'431200': '怀化市',
'431202': '鹤城区',
'431221': '中方县',
'431222': '沅陵县',
'431223': '辰溪县',
'431224': '溆浦县',
'431225': '会同县',
'431226': '麻阳苗族自治县',
'431227': '新晃侗族自治县',
'431228': '芷江侗族自治县',
'431229': '靖州苗族侗族自治县',
'431230': '通道侗族自治县',
'431281': '洪江市',
'431300': '娄底市',
'431302': '娄星区',
'431321': '双峰县',
'431322': '新化县',
'431381': '冷水江市',
'431382': '涟源市',
'433100': '湘西土家族苗族自治州',
'433101': '吉首市',
'433122': '泸溪县',
'433123': '凤凰县',
'433124': '花垣县',
'433125': '保靖县',
'433126': '古丈县',
'433127': '永顺县',
'433130': '龙山县',
'440000': '广东省',
'440100': '广州市',
'440103': '荔湾区',
'440104': '越秀区',
'440105': '海珠区',
'440106': '天河区',
'440111': '白云区',
'440112': '黄埔区',
'440113': '番禺区',
'440114': '花都区',
'440115': '南沙区',
'440117': '从化区',
'440118': '增城区',
'440200': '韶关市',
'440203': '武江区',
'440204': '浈江区',
'440205': '曲江区',
'440222': '始兴县',
'440224': '仁化县',
'440229': '翁源县',
'440232': '乳源瑶族自治县',
'440233': '新丰县',
'440281': '乐昌市',
'440282': '南雄市',
'440300': '深圳市',
'440303': '罗湖区',
'440304': '福田区',
'440305': '南山区',
'440306': '宝安区',
'440307': '龙岗区',
'440308': '盐田区',
'440400': '珠海市',
'440402': '香洲区',
'440403': '斗门区',
'440404': '金湾区',
'440500': '汕头市',
'440507': '龙湖区',
'440511': '金平区',
'440512': '濠江区',
'440513': '潮阳区',
'440514': '潮南区',
'440515': '澄海区',
'440523': '南澳县',
'440600': '佛山市',
'440604': '禅城区',
'440605': '南海区',
'440606': '顺德区',
'440607': '三水区',
'440608': '高明区',
'440700': '江门市',
'440703': '蓬江区',
'440704': '江海区',
'440705': '新会区',
'440781': '台山市',
'440783': '开平市',
'440784': '鹤山市',
'440785': '恩平市',
'440800': '湛江市',
'440802': '赤坎区',
'440803': '霞山区',
'440804': '坡头区',
'440811': '麻章区',
'440823': '遂溪县',
'440825': '徐闻县',
'440881': '廉江市',
'440882': '雷州市',
'440883': '吴川市',
'440900': '茂名市',
'440902': '茂南区',
'440904': '电白区',
'440981': '高州市',
'440982': '化州市',
'440983': '信宜市',
'441200': '肇庆市',
'441202': '端州区',
'441203': '鼎湖区',
'441204': '高要区',
'441223': '广宁县',
'441224': '怀集县',
'441225': '封开县',
'441226': '德庆县',
'441284': '四会市',
'441300': '惠州市',
'441302': '惠城区',
'441303': '惠阳区',
'441322': '博罗县',
'441323': '惠东县',
'441324': '龙门县',
'441400': '梅州市',
'441402': '梅江区',
'441403': '梅县区',
'441422': '大埔县',
'441423': '丰顺县',
'441424': '五华县',
'441426': '平远县',
'441427': '蕉岭县',
'441481': '兴宁市',
'441500': '汕尾市',
'441502': '城区',
'441521': '海丰县',
'441523': '陆河县',
'441581': '陆丰市',
'441600': '河源市',
'441602': '源城区',
'441621': '紫金县',
'441622': '龙川县',
'441623': '连平县',
'441624': '和平县',
'441625': '东源县',
'441700': '阳江市',
'441702': '江城区',
'441704': '阳东区',
'441721': '阳西县',
'441781': '阳春市',
'441800': '清远市',
'441802': '清城区',
'441803': '清新区',
'441821': '佛冈县',
'441823': '阳山县',
'441825': '连山壮族瑶族自治县',
'441826': '连南瑶族自治县',
'441881': '英德市',
'441882': '连州市',
'441900': '东莞市',
'442000': '中山市',
'445100': '潮州市',
'445102': '湘桥区',
'445103': '潮安区',
'445122': '饶平县',
'445200': '揭阳市',
'445202': '榕城区',
'445203': '揭东区',
'445222': '揭西县',
'445224': '惠来县',
'445281': '普宁市',
'445300': '云浮市',
'445302': '云城区',
'445303': '云安区',
'445321': '新兴县',
'445322': '郁南县',
'445381': '罗定市',
'450000': '广西壮族自治区',
'450100': '南宁市',
'450102': '兴宁区',
'450103': '青秀区',
'450105': '江南区',
'450107': '西乡塘区',
'450108': '良庆区',
'450109': '邕宁区',
'450110': '武鸣区',
'450123': '隆安县',
'450124': '马山县',
'450125': '上林县',
'450126': '宾阳县',
'450127': '横县',
'450200': '柳州市',
'450202': '城中区',
'450203': '鱼峰区',
'450204': '柳南区',
'450205': '柳北区',
'450221': '柳江县',
'450222': '柳城县',
'450223': '鹿寨县',
'450224': '融安县',
'450225': '融水苗族自治县',
'450226': '三江侗族自治县',
'450300': '桂林市',
'450302': '秀峰区',
'450303': '叠彩区',
'450304': '象山区',
'450305': '七星区',
'450311': '雁山区',
'450312': '临桂区',
'450321': '阳朔县',
'450323': '灵川县',
'450324': '全州县',
'450325': '兴安县',
'450326': '永福县',
'450327': '灌阳县',
'450328': '龙胜各族自治县',
'450329': '资源县',
'450330': '平乐县',
'450331': '荔浦县',
'450332': '恭城瑶族自治县',
'450400': '梧州市',
'450403': '万秀区',
'450405': '长洲区',
'450406': '龙圩区',
'450421': '苍梧县',
'450422': '藤县',
'450423': '蒙山县',
'450481': '岑溪市',
'450500': '北海市',
'450502': '海城区',
'450503': '银海区',
'450512': '铁山港区',
'450521': '合浦县',
'450600': '防城港市',
'450602': '港口区',
'450603': '防城区',
'450621': '上思县',
'450681': '东兴市',
'450700': '钦州市',
'450702': '钦南区',
'450703': '钦北区',
'450721': '灵山县',
'450722': '浦北县',
'450800': '贵港市',
'450802': '港北区',
'450803': '港南区',
'450804': '覃塘区',
'450821': '平南县',
'450881': '桂平市',
'450900': '玉林市',
'450902': '玉州区',
'450903': '福绵区',
'450921': '容县',
'450922': '陆川县',
'450923': '博白县',
'450924': '兴业县',
'450981': '北流市',
'451000': '百色市',
'451002': '右江区',
'451021': '田阳县',
'451022': '田东县',
'451023': '平果县',
'451024': '德保县',
'451026': '那坡县',
'451027': '凌云县',
'451028': '乐业县',
'451029': '田林县',
'451030': '西林县',
'451031': '隆林各族自治县',
'451081': '靖西市',
'451100': '贺州市',
'451102': '八步区',
'451103': '平桂区',
'451121': '昭平县',
'451122': '钟山县',
'451123': '富川瑶族自治县',
'451200': '河池市',
'451202': '金城江区',
'451221': '南丹县',
'451222': '天峨县',
'451223': '凤山县',
'451224': '东兰县',
'451225': '罗城仫佬族自治县',
'451226': '环江毛南族自治县',
'451227': '巴马瑶族自治县',
'451228': '都安瑶族自治县',
'451229': '大化瑶族自治县',
'451281': '宜州市',
'451300': '来宾市',
'451302': '兴宾区',
'451321': '忻城县',
'451322': '象州县',
'451323': '武宣县',
'451324': '金秀瑶族自治县',
'451381': '合山市',
'451400': '崇左市',
'451402': '江州区',
'451421': '扶绥县',
'451422': '宁明县',
'451423': '龙州县',
'451424': '大新县',
'451425': '天等县',
'451481': '凭祥市',
'460000': '海南省',
'460100': '海口市',
'460105': '秀英区',
'460106': '龙华区',
'460107': '琼山区',
'460108': '美兰区',
'460200': '三亚市',
'460202': '海棠区',
'460203': '吉阳区',
'460204': '天涯区',
'460205': '崖州区',
'460300': '三沙市',
'460400': '儋州市',
'469001': '五指山市',
'469002': '琼海市',
'469005': '文昌市',
'469006': '万宁市',
'469007': '东方市',
'469021': '定安县',
'469022': '屯昌县',
'469023': '澄迈县',
'469024': '临高县',
'469025': '白沙黎族自治县',
'469026': '昌江黎族自治县',
'469027': '乐东黎族自治县',
'469028': '陵水黎族自治县',
'469029': '保亭黎族苗族自治县',
'469030': '琼中黎族苗族自治县',
'500000': '重庆市',
'500101': '万州区',
'500102': '涪陵区',
'500103': '渝中区',
'500104': '大渡口区',
'500105': '江北区',
'500106': '沙坪坝区',
'500107': '九龙坡区',
'500108': '南岸区',
'500109': '北碚区',
'500110': '綦江区',
'500111': '大足区',
'500112': '渝北区',
'500113': '巴南区',
'500114': '黔江区',
'500115': '长寿区',
'500116': '江津区',
'500117': '合川区',
'500118': '永川区',
'500119': '南川区',
'500120': '璧山区',
'500151': '铜梁区',
'500152': '潼南区',
'500153': '荣昌区',
'500154': '开州区',
'500228': '梁平县',
'500229': '城口县',
'500230': '丰都县',
'500231': '垫江县',
'500232': '武隆县',
'500233': '忠县',
'500235': '云阳县',
'500236': '奉节县',
'500237': '巫山县',
'500238': '巫溪县',
'500240': '石柱土家族自治县',
'500241': '秀山土家族苗族自治县',
'500242': '酉阳土家族苗族自治县',
'500243': '彭水苗族土家族自治县',
'510000': '四川省',
'510100': '成都市',
'510104': '锦江区',
'510105': '青羊区',
'510106': '金牛区',
'510107': '武侯区',
'510108': '成华区',
'510112': '龙泉驿区',
'510113': '青白江区',
'510114': '新都区',
'510115': '温江区',
'510116': '双流区',
'510121': '金堂县',
'510124': '郫县',
'510129': '大邑县',
'510131': '蒲江县',
'510132': '新津县',
'510181': '都江堰市',
'510182': '彭州市',
'510183': '邛崃市',
'510184': '崇州市',
'510185': '简阳市',
'510300': '自贡市',
'510302': '自流井区',
'510303': '贡井区',
'510304': '大安区',
'510311': '沿滩区',
'510321': '荣县',
'510322': '富顺县',
'510400': '攀枝花市',
'510402': '东区',
'510403': '西区',
'510411': '仁和区',
'510421': '米易县',
'510422': '盐边县',
'510500': '泸州市',
'510502': '江阳区',
'510503': '纳溪区',
'510504': '龙马潭区',
'510521': '泸县',
'510522': '合江县',
'510524': '叙永县',
'510525': '古蔺县',
'510600': '德阳市',
'510603': '旌阳区',
'510623': '中江县',
'510626': '罗江县',
'510681': '广汉市',
'510682': '什邡市',
'510683': '绵竹市',
'510700': '绵阳市',
'510703': '涪城区',
'510704': '游仙区',
'510705': '安州区',
'510722': '三台县',
'510723': '盐亭县',
'510725': '梓潼县',
'510726': '北川羌族自治县',
'510727': '平武县',
'510781': '江油市',
'510800': '广元市',
'510802': '利州区',
'510811': '昭化区',
'510812': '朝天区',
'510821': '旺苍县',
'510822': '青川县',
'510823': '剑阁县',
'510824': '苍溪县',
'510900': '遂宁市',
'510903': '船山区',
'510904': '安居区',
'510921': '蓬溪县',
'510922': '射洪县',
'510923': '大英县',
'511000': '内江市',
'511002': '市中区',
'511011': '东兴区',
'511024': '威远县',
'511025': '资中县',
'511028': '隆昌县',
'511100': '乐山市',
'511102': '市中区',
'511111': '沙湾区',
'511112': '五通桥区',
'511113': '金口河区',
'511123': '犍为县',
'511124': '井研县',
'511126': '夹江县',
'511129': '沐川县',
'511132': '峨边彝族自治县',
'511133': '马边彝族自治县',
'511181': '峨眉山市',
'511300': '南充市',
'511302': '顺庆区',
'511303': '高坪区',
'511304': '嘉陵区',
'511321': '南部县',
'511322': '营山县',
'511323': '蓬安县',
'511324': '仪陇县',
'511325': '西充县',
'511381': '阆中市',
'511400': '眉山市',
'511402': '东坡区',
'511403': '彭山区',
'511421': '仁寿县',
'511423': '洪雅县',
'511424': '丹棱县',
'511425': '青神县',
'511500': '宜宾市',
'511502': '翠屏区',
'511503': '南溪区',
'511521': '宜宾县',
'511523': '江安县',
'511524': '长宁县',
'511525': '高县',
'511526': '珙县',
'511527': '筠连县',
'511528': '兴文县',
'511529': '屏山县',
'511600': '广安市',
'511602': '广安区',
'511603': '前锋区',
'511621': '岳池县',
'511622': '武胜县',
'511623': '邻水县',
'511681': '华蓥市',
'511700': '达州市',
'511702': '通川区',
'511703': '达川区',
'511722': '宣汉县',
'511723': '开江县',
'511724': '大竹县',
'511725': '渠县',
'511781': '万源市',
'511800': '雅安市',
'511802': '雨城区',
'511803': '名山区',
'511822': '荥经县',
'511823': '汉源县',
'511824': '石棉县',
'511825': '天全县',
'511826': '芦山县',
'511827': '宝兴县',
'511900': '巴中市',
'511902': '巴州区',
'511903': '恩阳区',
'511921': '通江县',
'511922': '南江县',
'511923': '平昌县',
'512000': '资阳市',
'512002': '雁江区',
'512021': '安岳县',
'512022': '乐至县',
'513200': '阿坝藏族羌族自治州',
'513201': '马尔康区',
'513221': '汶川县',
'513222': '理县',
'513223': '茂县',
'513224': '松潘县',
'513225': '九寨沟县',
'513226': '金川县',
'513227': '小金县',
'513228': '黑水县',
'513230': '壤塘县',
'513231': '阿坝县',
'513232': '若尔盖县',
'513233': '红原县',
'513300': '甘孜藏族自治州',
'513301': '康定市',
'513322': '泸定县',
'513323': '丹巴县',
'513324': '九龙县',
'513325': '雅江县',
'513326': '道孚县',
'513327': '炉霍县',
'513328': '甘孜县',
'513329': '新龙县',
'513330': '德格县',
'513331': '白玉县',
'513332': '石渠县',
'513333': '色达县',
'513334': '理塘县',
'513335': '巴塘县',
'513336': '乡城县',
'513337': '稻城县',
'513338': '得荣县',
'513400': '凉山彝族自治州',
'513401': '西昌市',
'513422': '木里藏族自治县',
'513423': '盐源县',
'513424': '德昌县',
'513425': '会理县',
'513426': '会东县',
'513427': '宁南县',
'513428': '普格县',
'513429': '布拖县',
'513430': '金阳县',
'513431': '昭觉县',
'513432': '喜德县',
'513433': '冕宁县',
'513434': '越西县',
'513435': '甘洛县',
'513436': '美姑县',
'513437': '雷波县',
'520000': '贵州省',
'520100': '贵阳市',
'520102': '南明区',
'520103': '云岩区',
'520111': '花溪区',
'520112': '乌当区',
'520113': '白云区',
'520115': '观山湖区',
'520121': '开阳县',
'520122': '息烽县',
'520123': '修文县',
'520181': '清镇市',
'520200': '六盘水市',
'520201': '钟山区',
'520203': '六枝特区',
'520221': '水城县',
'520222': '盘县',
'520300': '遵义市',
'520302': '红花岗区',
'520303': '汇川区',
'520304': '播州区',
'520322': '桐梓县',
'520323': '绥阳县',
'520324': '正安县',
'520325': '道真仡佬族苗族自治县',
'520326': '务川仡佬族苗族自治县',
'520327': '凤冈县',
'520328': '湄潭县',
'520329': '余庆县',
'520330': '习水县',
'520381': '赤水市',
'520382': '仁怀市',
'520400': '安顺市',
'520402': '西秀区',
'520403': '平坝区',
'520422': '普定县',
'520423': '镇宁布依族苗族自治县',
'520424': '关岭布依族苗族自治县',
'520425': '紫云苗族布依族自治县',
'520500': '毕节市',
'520502': '七星关区',
'520521': '大方县',
'520522': '黔西县',
'520523': '金沙县',
'520524': '织金县',
'520525': '纳雍县',
'520526': '威宁彝族回族苗族自治县',
'520527': '赫章县',
'520600': '铜仁市',
'520602': '碧江区',
'520603': '万山区',
'520621': '江口县',
'520622': '玉屏侗族自治县',
'520623': '石阡县',
'520624': '思南县',
'520625': '印江土家族苗族自治县',
'520626': '德江县',
'520627': '沿河土家族自治县',
'520628': '松桃苗族自治县',
'522300': '黔西南布依族苗族自治州',
'522301': '兴义市',
'522322': '兴仁县',
'522323': '普安县',
'522324': '晴隆县',
'522325': '贞丰县',
'522326': '望谟县',
'522327': '册亨县',
'522328': '安龙县',
'522600': '黔东南苗族侗族自治州',
'522601': '凯里市',
'522622': '黄平县',
'522623': '施秉县',
'522624': '三穗县',
'522625': '镇远县',
'522626': '岑巩县',
'522627': '天柱县',
'522628': '锦屏县',
'522629': '剑河县',
'522630': '台江县',
'522631': '黎平县',
'522632': '榕江县',
'522633': '从江县',
'522634': '雷山县',
'522635': '麻江县',
'522636': '丹寨县',
'522700': '黔南布依族苗族自治州',
'522701': '都匀市',
'522702': '福泉市',
'522722': '荔波县',
'522723': '贵定县',
'522725': '瓮安县',
'522726': '独山县',
'522727': '平塘县',
'522728': '罗甸县',
'522729': '长顺县',
'522730': '龙里县',
'522731': '惠水县',
'522732': '三都水族自治县',
'530000': '云南省',
'530100': '昆明市',
'530102': '五华区',
'530103': '盘龙区',
'530111': '官渡区',
'530112': '西山区',
'530113': '东川区',
'530114': '呈贡区',
'530122': '晋宁县',
'530124': '富民县',
'530125': '宜良县',
'530126': '石林彝族自治县',
'530127': '嵩明县',
'530128': '禄劝彝族苗族自治县',
'530129': '寻甸回族彝族自治县',
'530181': '安宁市',
'530300': '曲靖市',
'530302': '麒麟区',
'530303': '沾益区',
'530321': '马龙县',
'530322': '陆良县',
'530323': '师宗县',
'530324': '罗平县',
'530325': '富源县',
'530326': '会泽县',
'530381': '宣威市',
'530400': '玉溪市',
'530402': '红塔区',
'530403': '江川区',
'530422': '澄江县',
'530423': '通海县',
'530424': '华宁县',
'530425': '易门县',
'530426': '峨山彝族自治县',
'530427': '新平彝族傣族自治县',
'530428': '元江哈尼族彝族傣族自治县',
'530500': '保山市',
'530502': '隆阳区',
'530521': '施甸县',
'530581': '腾冲市',
'530523': '龙陵县',
'530524': '昌宁县',
'530600': '昭通市',
'530602': '昭阳区',
'530621': '鲁甸县',
'530622': '巧家县',
'530623': '盐津县',
'530624': '大关县',
'530625': '永善县',
'530626': '绥江县',
'530627': '镇雄县',
'530628': '彝良县',
'530629': '威信县',
'530630': '水富县',
'530700': '丽江市',
'530702': '古城区',
'530721': '玉龙纳西族自治县',
'530722': '永胜县',
'530723': '华坪县',
'530724': '宁蒗彝族自治县',
'530800': '普洱市',
'530802': '思茅区',
'530821': '宁洱哈尼族彝族自治县',
'530822': '墨江哈尼族自治县',
'530823': '景东彝族自治县',
'530824': '景谷傣族彝族自治县',
'530825': '镇沅彝族哈尼族拉祜族自治县',
'530826': '江城哈尼族彝族自治县',
'530827': '孟连傣族拉祜族佤族自治县',
'530828': '澜沧拉祜族自治县',
'530829': '西盟佤族自治县',
'530900': '临沧市',
'530902': '临翔区',
'530921': '凤庆县',
'530922': '云县',
'530923': '永德县',
'530924': '镇康县',
'530925': '双江拉祜族佤族布朗族傣族自治县',
'530926': '耿马傣族佤族自治县',
'530927': '沧源佤族自治县',
'532300': '楚雄彝族自治州',
'532301': '楚雄市',
'532322': '双柏县',
'532323': '牟定县',
'532324': '南华县',
'532325': '姚安县',
'532326': '大姚县',
'532327': '永仁县',
'532328': '元谋县',
'532329': '武定县',
'532331': '禄丰县',
'532500': '红河哈尼族彝族自治州',
'532501': '个旧市',
'532502': '开远市',
'532503': '蒙自市',
'532504': '弥勒市',
'532523': '屏边苗族自治县',
'532524': '建水县',
'532525': '石屏县',
'532527': '泸西县',
'532528': '元阳县',
'532529': '红河县',
'532530': '金平苗族瑶族傣族自治县',
'532531': '绿春县',
'532532': '河口瑶族自治县',
'532600': '文山壮族苗族自治州',
'532601': '文山市',
'532622': '砚山县',
'532623': '西畴县',
'532624': '麻栗坡县',
'532625': '马关县',
'532626': '丘北县',
'532627': '广南县',
'532628': '富宁县',
'532800': '西双版纳傣族自治州',
'532801': '景洪市',
'532822': '勐海县',
'532823': '勐腊县',
'532900': '大理白族自治州',
'532901': '大理市',
'532922': '漾濞彝族自治县',
'532923': '祥云县',
'532924': '宾川县',
'532925': '弥渡县',
'532926': '南涧彝族自治县',
'532927': '巍山彝族回族自治县',
'532928': '永平县',
'532929': '云龙县',
'532930': '洱源县',
'532931': '剑川县',
'532932': '鹤庆县',
'533100': '德宏傣族景颇族自治州',
'533102': '瑞丽市',
'533103': '芒市',
'533122': '梁河县',
'533123': '盈江县',
'533124': '陇川县',
'533300': '怒江傈僳族自治州',
'533301': '泸水市',
'533323': '福贡县',
'533324': '贡山独龙族怒族自治县',
'533325': '兰坪白族普米族自治县',
'533400': '迪庆藏族自治州',
'533401': '香格里拉市',
'533422': '德钦县',
'533423': '维西傈僳族自治县',
'540000': '西藏自治区',
'540100': '拉萨市',
'540102': '城关区',
'540103': '堆龙德庆区',
'540121': '林周县',
'540122': '当雄县',
'540123': '尼木县',
'540124': '曲水县',
'540126': '达孜县',
'540127': '墨竹工卡县',
'540200': '日喀则市',
'540202': '桑珠孜区',
'540221': '南木林县',
'540222': '江孜县',
'540223': '定日县',
'540224': '萨迦县',
'540225': '拉孜县',
'540226': '昂仁县',
'540227': '谢通门县',
'540228': '白朗县',
'540229': '仁布县',
'540230': '康马县',
'540231': '定结县',
'540232': '仲巴县',
'540233': '亚东县',
'540234': '吉隆县',
'540235': '聂拉木县',
'540236': '萨嘎县',
'540237': '岗巴县',
'540300': '昌都市',
'540302': '卡若区',
'540321': '江达县',
'540322': '贡觉县',
'540323': '类乌齐县',
'540324': '丁青县',
'540325': '察雅县',
'540326': '八宿县',
'540327': '左贡县',
'540328': '芒康县',
'540329': '洛隆县',
'540330': '边坝县',
'540400': '林芝市',
'540402': '巴宜区',
'540421': '工布江达县',
'540422': '米林县',
'540423': '墨脱县',
'540424': '波密县',
'540425': '察隅县',
'540426': '朗县',
'540500': '山南市',
'540502': '乃东区',
'540521': '扎囊县',
'540522': '贡嘎县',
'540523': '桑日县',
'540524': '琼结县',
'540525': '曲松县',
'540526': '措美县',
'540527': '洛扎县',
'540528': '加查县',
'540529': '隆子县',
'540530': '错那县',
'540531': '浪卡子县',
'542400': '那曲地区',
'542421': '那曲县',
'542422': '嘉黎县',
'542423': '比如县',
'542424': '聂荣县',
'542425': '安多县',
'542426': '申扎县',
'542427': '索县',
'542428': '班戈县',
'542429': '巴青县',
'542430': '尼玛县',
'542431': '双湖县',
'542500': '阿里地区',
'542521': '普兰县',
'542522': '札达县',
'542523': '噶尔县',
'542524': '日土县',
'542525': '革吉县',
'542526': '改则县',
'542527': '措勤县',
'610000': '陕西省',
'610100': '西安市',
'610102': '新城区',
'610103': '碑林区',
'610104': '莲湖区',
'610111': '灞桥区',
'610112': '未央区',
'610113': '雁塔区',
'610114': '阎良区',
'610115': '临潼区',
'610116': '长安区',
'610117': '高陵区',
'610122': '蓝田县',
'610124': '周至县',
'610125': '户县',
'610200': '铜川市',
'610202': '王益区',
'610203': '印台区',
'610204': '耀州区',
'610222': '宜君县',
'610300': '宝鸡市',
'610302': '渭滨区',
'610303': '金台区',
'610304': '陈仓区',
'610322': '凤翔县',
'610323': '岐山县',
'610324': '扶风县',
'610326': '眉县',
'610327': '陇县',
'610328': '千阳县',
'610329': '麟游县',
'610330': '凤县',
'610331': '太白县',
'610400': '咸阳市',
'610402': '秦都区',
'610403': '杨陵区',
'610404': '渭城区',
'610422': '三原县',
'610423': '泾阳县',
'610424': '乾县',
'610425': '礼泉县',
'610426': '永寿县',
'610427': '彬县',
'610428': '长武县',
'610429': '旬邑县',
'610430': '淳化县',
'610431': '武功县',
'610481': '兴平市',
'610500': '渭南市',
'610502': '临渭区',
'610503': '华州区',
'610522': '潼关县',
'610523': '大荔县',
'610524': '合阳县',
'610525': '澄城县',
'610526': '蒲城县',
'610527': '白水县',
'610528': '富平县',
'610581': '韩城市',
'610582': '华阴市',
'610600': '延安市',
'610602': '宝塔区',
'610621': '延长县',
'610622': '延川县',
'610623': '子长县',
'610624': '安塞县',
'610625': '志丹县',
'610626': '吴起县',
'610627': '甘泉县',
'610628': '富县',
'610629': '洛川县',
'610630': '宜川县',
'610631': '黄龙县',
'610632': '黄陵县',
'610700': '汉中市',
'610702': '汉台区',
'610721': '南郑县',
'610722': '城固县',
'610723': '洋县',
'610724': '西乡县',
'610725': '勉县',
'610726': '宁强县',
'610727': '略阳县',
'610728': '镇巴县',
'610729': '留坝县',
'610730': '佛坪县',
'610800': '榆林市',
'610802': '榆阳区',
'610803': '横山区',
'610821': '神木县',
'610822': '府谷县',
'610824': '靖边县',
'610825': '定边县',
'610826': '绥德县',
'610827': '米脂县',
'610828': '佳县',
'610829': '吴堡县',
'610830': '清涧县',
'610831': '子洲县',
'610900': '安康市',
'610902': '汉滨区',
'610921': '汉阴县',
'610922': '石泉县',
'610923': '宁陕县',
'610924': '紫阳县',
'610925': '岚皋县',
'610926': '平利县',
'610927': '镇坪县',
'610928': '旬阳县',
'610929': '白河县',
'611000': '商洛市',
'611002': '商州区',
'611021': '洛南县',
'611022': '丹凤县',
'611023': '商南县',
'611024': '山阳县',
'611025': '镇安县',
'611026': '柞水县',
'620000': '甘肃省',
'620100': '兰州市',
'620102': '城关区',
'620103': '七里河区',
'620104': '西固区',
'620105': '安宁区',
'620111': '红古区',
'620121': '永登县',
'620122': '皋兰县',
'620123': '榆中县',
'620200': '嘉峪关市',
'620300': '金昌市',
'620302': '金川区',
'620321': '永昌县',
'620400': '白银市',
'620402': '白银区',
'620403': '平川区',
'620421': '靖远县',
'620422': '会宁县',
'620423': '景泰县',
'620500': '天水市',
'620502': '秦州区',
'620503': '麦积区',
'620521': '清水县',
'620522': '秦安县',
'620523': '甘谷县',
'620524': '武山县',
'620525': '张家川回族自治县',
'620600': '武威市',
'620602': '凉州区',
'620621': '民勤县',
'620622': '古浪县',
'620623': '天祝藏族自治县',
'620700': '张掖市',
'620702': '甘州区',
'620721': '肃南裕固族自治县',
'620722': '民乐县',
'620723': '临泽县',
'620724': '高台县',
'620725': '山丹县',
'620800': '平凉市',
'620802': '崆峒区',
'620821': '泾川县',
'620822': '灵台县',
'620823': '崇信县',
'620824': '华亭县',
'620825': '庄浪县',
'620826': '静宁县',
'620900': '酒泉市',
'620902': '肃州区',
'620921': '金塔县',
'620922': '瓜州县',
'620923': '肃北蒙古族自治县',
'620924': '阿克塞哈萨克族自治县',
'620981': '玉门市',
'620982': '敦煌市',
'621000': '庆阳市',
'621002': '西峰区',
'621021': '庆城县',
'621022': '环县',
'621023': '华池县',
'621024': '合水县',
'621025': '正宁县',
'621026': '宁县',
'621027': '镇原县',
'621100': '定西市',
'621102': '安定区',
'621121': '通渭县',
'621122': '陇西县',
'621123': '渭源县',
'621124': '临洮县',
'621125': '漳县',
'621126': '岷县',
'621200': '陇南市',
'621202': '武都区',
'621221': '成县',
'621222': '文县',
'621223': '宕昌县',
'621224': '康县',
'621225': '西和县',
'621226': '礼县',
'621227': '徽县',
'621228': '两当县',
'622900': '临夏回族自治州',
'622901': '临夏市',
'622921': '临夏县',
'622922': '康乐县',
'622923': '永靖县',
'622924': '广河县',
'622925': '和政县',
'622926': '东乡族自治县',
'622927': '积石山保安族东乡族撒拉族自治县',
'623000': '甘南藏族自治州',
'623001': '合作市',
'623021': '临潭县',
'623022': '卓尼县',
'623023': '舟曲县',
'623024': '迭部县',
'623025': '玛曲县',
'623026': '碌曲县',
'623027': '夏河县',
'630000': '青海省',
'630100': '西宁市',
'630102': '城东区',
'630103': '城中区',
'630104': '城西区',
'630105': '城北区',
'630121': '大通回族土族自治县',
'630122': '湟中县',
'630123': '湟源县',
'630200': '海东市',
'630202': '乐都区',
'630203': '平安区',
'630222': '民和回族土族自治县',
'630223': '互助土族自治县',
'630224': '化隆回族自治县',
'630225': '循化撒拉族自治县',
'632200': '海北藏族自治州',
'632221': '门源回族自治县',
'632222': '祁连县',
'632223': '海晏县',
'632224': '刚察县',
'632300': '黄南藏族自治州',
'632321': '同仁县',
'632322': '尖扎县',
'632323': '泽库县',
'632324': '河南蒙古族自治县',
'632500': '海南藏族自治州',
'632521': '共和县',
'632522': '同德县',
'632523': '贵德县',
'632524': '兴海县',
'632525': '贵南县',
'632600': '果洛藏族自治州',
'632621': '玛沁县',
'632622': '班玛县',
'632623': '甘德县',
'632624': '达日县',
'632625': '久治县',
'632626': '玛多县',
'632700': '玉树藏族自治州',
'632701': '玉树市',
'632722': '杂多县',
'632723': '称多县',
'632724': '治多县',
'632725': '囊谦县',
'632726': '曲麻莱县',
'632800': '海西蒙古族藏族自治州',
'632801': '格尔木市',
'632802': '德令哈市',
'632821': '乌兰县',
'632822': '都兰县',
'632823': '天峻县',
'640000': '宁夏回族自治区',
'640100': '银川市',
'640104': '兴庆区',
'640105': '西夏区',
'640106': '金凤区',
'640121': '永宁县',
'640122': '贺兰县',
'640181': '灵武市',
'640200': '石嘴山市',
'640202': '大武口区',
'640205': '惠农区',
'640221': '平罗县',
'640300': '吴忠市',
'640302': '利通区',
'640303': '红寺堡区',
'640323': '盐池县',
'640324': '同心县',
'640381': '青铜峡市',
'640400': '固原市',
'640402': '原州区',
'640422': '西吉县',
'640423': '隆德县',
'640424': '泾源县',
'640425': '彭阳县',
'640500': '中卫市',
'640502': '沙坡头区',
'640521': '中宁县',
'640522': '海原县',
'650000': '新疆维吾尔自治区',
'650100': '乌鲁木齐市',
'650102': '天山区',
'650103': '沙依巴克区',
'650104': '新市区',
'650105': '水磨沟区',
'650106': '头屯河区',
'650107': '达坂城区',
'650109': '米东区',
'650121': '乌鲁木齐县',
'650200': '克拉玛依市',
'650202': '独山子区',
'650203': '克拉玛依区',
'650204': '白碱滩区',
'650205': '乌尔禾区',
'650400': '吐鲁番市',
'650402': '高昌区',
'650421': '鄯善县',
'650422': '托克逊县',
'650500': '哈密市',
'650502': '伊州区',
'650521': '巴里坤哈萨克自治县',
'650522': '伊吾县',
'652300': '昌吉回族自治州',
'652301': '昌吉市',
'652302': '阜康市',
'652323': '呼图壁县',
'652324': '玛纳斯县',
'652325': '奇台县',
'652327': '吉木萨尔县',
'652328': '木垒哈萨克自治县',
'652700': '博尔塔拉蒙古自治州',
'652701': '博乐市',
'652702': '阿拉山口市',
'652722': '精河县',
'652723': '温泉县',
'652800': '巴音郭楞蒙古自治州',
'652801': '库尔勒市',
'652822': '轮台县',
'652823': '尉犁县',
'652824': '若羌县',
'652825': '且末县',
'652826': '焉耆回族自治县',
'652827': '和静县',
'652828': '和硕县',
'652829': '博湖县',
'652900': '阿克苏地区',
'652901': '阿克苏市',
'652922': '温宿县',
'652923': '库车县',
'652924': '沙雅县',
'652925': '新和县',
'652926': '拜城县',
'652927': '乌什县',
'652928': '阿瓦提县',
'652929': '柯坪县',
'653000': '克孜勒苏柯尔克孜自治州',
'653001': '阿图什市',
'653022': '阿克陶县',
'653023': '阿合奇县',
'653024': '乌恰县',
'653100': '喀什地区',
'653101': '喀什市',
'653121': '疏附县',
'653122': '疏勒县',
'653123': '英吉沙县',
'653124': '泽普县',
'653125': '莎车县',
'653126': '叶城县',
'653127': '麦盖提县',
'653128': '岳普湖县',
'653129': '伽师县',
'653130': '巴楚县',
'653131': '塔什库尔干塔吉克自治县',
'653200': '和田地区',
'653201': '和田市',
'653221': '和田县',
'653222': '墨玉县',
'653223': '皮山县',
'653224': '洛浦县',
'653225': '策勒县',
'653226': '于田县',
'653227': '民丰县',
'654000': '伊犁哈萨克自治州',
'654002': '伊宁市',
'654003': '奎屯市',
'654004': '霍尔果斯市',
'654021': '伊宁县',
'654022': '察布查尔锡伯自治县',
'654023': '霍城县',
'654024': '巩留县',
'654025': '新源县',
'654026': '昭苏县',
'654027': '特克斯县',
'654028': '尼勒克县',
'654200': '塔城地区',
'654201': '塔城市',
'654202': '乌苏市',
'654221': '额敏县',
'654223': '沙湾县',
'654224': '托里县',
'654225': '裕民县',
'654226': '和布克赛尔蒙古自治县',
'654300': '阿勒泰地区',
'654301': '阿勒泰市',
'654321': '布尔津县',
'654322': '富蕴县',
'654323': '福海县',
'654324': '哈巴河县',
'654325': '青河县',
'654326': '吉木乃县',
'659001': '石河子市',
'659002': '阿拉尔市',
'659003': '图木舒克市',
'659004': '五家渠市',
'659005': '北屯市',
'659006': '铁门关市',
'659007': '双河市',
'659008': '可克达拉市',
'710000': '台湾省',
'810000': '香港特别行政区',
'820000': '澳门特别行政区',
} | PypiClean |
/MuPhyN-0.1.1.post4-py3-none-any.whl/muphyn/packages/interface/properties_pages/properties_widget.py |
from typing import List, Any
from PyQt6.QtWidgets import QWidget, QVBoxLayout, QLayout
from muphyn.packages.interface.graphical_actions.actions_holder import ActionsHolder
from muphyn.packages.interface.properties_pages.abstract_properties_editor import AbstractPropertiesEditor
from muphyn.packages.interface.properties_pages.properties_page_builder import get_properties_page
#-----------------------------------
# Class
#-----------------------------------
class PropertiesWidget (QWidget) :
"""Est le widget qui affiche les propriétés des éléments sélectionnés."""
# -------------
# Constructors
# -------------
def __init__(self, parent : QWidget = None) :
QWidget.__init__(self, parent)
# Init VBox Layout
vBoxLayout = QVBoxLayout()
vBoxLayout.setSizeConstraint(QLayout.SizeConstraint.SetMinAndMaxSize)
self.setLayout(vBoxLayout)
self._properties_editors : List[AbstractPropertiesEditor] = []
self._actions_holder = None
self._current_model = None
# -------------
# Properties
# -------------
@property
def actions_holder (self) -> ActionsHolder :
"""Permet de récuperer l'actions holder à utiliser pour les widgets de propriétés."""
return self._actions_holder
@actions_holder.setter
def actions_holder (self, actions_holder_ : ActionsHolder) -> None :
"""Permet de modifier l'actions holder à utiliser pour les widgets de propriétés."""
self._actions_holder = actions_holder_
for property_editor in self._properties_editors :
property_editor.actions_holder = self._actions_holder
@property
def current_model (self) -> Any :
"""Permet de récuperer le modèle actuellement en cours d'édition."""
return self._current_model
@current_model.setter
def current_model (self, current_model_ : Any) -> None :
"""Permet de modifier le modèle actuellement en cours d'édition."""
for editor in self._properties_editors :
# Remove all items from layout
if self.layout() is not None:
self.layout().removeWidget(editor)
# Disable update from property editor
editor.setUpdatesEnabled(False)
editor.actions_holder = None
editor.unload()
editor.deleteLater()
# Clear list of property editors
self._properties_editors.clear()
self._current_model = current_model_
# Build List of Property Editors
for rowIndex, editor in enumerate(get_properties_page(self._current_model)):
editor.actions_holder = self.actions_holder
self._properties_editors.append(editor)
self.layout().addWidget(editor) | PypiClean |
/Aruna-Python-API-1.1.0rc6.tar.gz/Aruna-Python-API-1.1.0rc6/aruna/api/internal/v1/notification_pb2_grpc.py | """Client and server classes corresponding to protobuf-defined services."""
import grpc
from aruna.api.internal.v1 import notification_pb2 as aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2
class InternalEventEmitterServiceStub(object):
"""Service hosted by the notification service application
the API server emits events to the notification service
Server --> Notification System
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.EmitEvent = channel.unary_unary(
'/aruna.api.internal.v1.InternalEventEmitterService/EmitEvent',
request_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.EmitEventRequest.SerializeToString,
response_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.EmitEventResponse.FromString,
)
class InternalEventEmitterServiceServicer(object):
"""Service hosted by the notification service application
the API server emits events to the notification service
Server --> Notification System
"""
def EmitEvent(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InternalEventEmitterServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'EmitEvent': grpc.unary_unary_rpc_method_handler(
servicer.EmitEvent,
request_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.EmitEventRequest.FromString,
response_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.EmitEventResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'aruna.api.internal.v1.InternalEventEmitterService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class InternalEventEmitterService(object):
"""Service hosted by the notification service application
the API server emits events to the notification service
Server --> Notification System
"""
@staticmethod
def EmitEvent(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/aruna.api.internal.v1.InternalEventEmitterService/EmitEvent',
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.EmitEventRequest.SerializeToString,
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.EmitEventResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class InternalEventServiceStub(object):
"""Service that allows the notification service to issue requests
to the server application
Notification System --> Server
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateStreamGroup = channel.unary_unary(
'/aruna.api.internal.v1.InternalEventService/CreateStreamGroup',
request_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.CreateStreamGroupRequest.SerializeToString,
response_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.CreateStreamGroupResponse.FromString,
)
self.GetStreamGroup = channel.unary_unary(
'/aruna.api.internal.v1.InternalEventService/GetStreamGroup',
request_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetStreamGroupRequest.SerializeToString,
response_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetStreamGroupResponse.FromString,
)
self.DeleteStreamGroup = channel.unary_unary(
'/aruna.api.internal.v1.InternalEventService/DeleteStreamGroup',
request_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.DeleteStreamGroupRequest.SerializeToString,
response_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.DeleteStreamGroupResponse.FromString,
)
self.GetSharedRevision = channel.unary_unary(
'/aruna.api.internal.v1.InternalEventService/GetSharedRevision',
request_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetSharedRevisionRequest.SerializeToString,
response_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetSharedRevisionResponse.FromString,
)
class InternalEventServiceServicer(object):
"""Service that allows the notification service to issue requests
to the server application
Notification System --> Server
"""
def CreateStreamGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStreamGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteStreamGroup(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSharedRevision(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_InternalEventServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'CreateStreamGroup': grpc.unary_unary_rpc_method_handler(
servicer.CreateStreamGroup,
request_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.CreateStreamGroupRequest.FromString,
response_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.CreateStreamGroupResponse.SerializeToString,
),
'GetStreamGroup': grpc.unary_unary_rpc_method_handler(
servicer.GetStreamGroup,
request_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetStreamGroupRequest.FromString,
response_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetStreamGroupResponse.SerializeToString,
),
'DeleteStreamGroup': grpc.unary_unary_rpc_method_handler(
servicer.DeleteStreamGroup,
request_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.DeleteStreamGroupRequest.FromString,
response_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.DeleteStreamGroupResponse.SerializeToString,
),
'GetSharedRevision': grpc.unary_unary_rpc_method_handler(
servicer.GetSharedRevision,
request_deserializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetSharedRevisionRequest.FromString,
response_serializer=aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetSharedRevisionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'aruna.api.internal.v1.InternalEventService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class InternalEventService(object):
"""Service that allows the notification service to issue requests
to the server application
Notification System --> Server
"""
@staticmethod
def CreateStreamGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/aruna.api.internal.v1.InternalEventService/CreateStreamGroup',
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.CreateStreamGroupRequest.SerializeToString,
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.CreateStreamGroupResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetStreamGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/aruna.api.internal.v1.InternalEventService/GetStreamGroup',
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetStreamGroupRequest.SerializeToString,
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetStreamGroupResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteStreamGroup(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/aruna.api.internal.v1.InternalEventService/DeleteStreamGroup',
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.DeleteStreamGroupRequest.SerializeToString,
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.DeleteStreamGroupResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetSharedRevision(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/aruna.api.internal.v1.InternalEventService/GetSharedRevision',
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetSharedRevisionRequest.SerializeToString,
aruna_dot_api_dot_internal_dot_v1_dot_notification__pb2.GetSharedRevisionResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) | PypiClean |
/MetaGram-2.0.2.tar.gz/MetaGram-2.0.2/pyrogram/types/inline_mode/inline_query_result_document.py |
from typing import Optional, List
import pyrogram
from pyrogram import raw, types, utils, enums
from .inline_query_result import InlineQueryResult
class InlineQueryResultDocument(InlineQueryResult):
"""Link to a file.
By default, this file will be sent by the user with an optional caption.
Alternatively, you can use *input_message_content* to send a message with the specified content instead of the file.
Parameters:
document_url (``str``):
A valid URL for the file.
title (``str``):
Title for the result.
mime_type (``str``, *optional*):
Mime type of the content of the file, either “application/pdf” or “application/zip”.
Defaults to "application/zip".
id (``str``, *optional*):
Unique identifier for this result, 1-64 bytes.
Defaults to a randomly generated UUID4.
caption (``str``, *optional*):
Caption of the video to be sent, 0-1024 characters.
parse_mode (:obj:`~pyrogram.enums.ParseMode`, *optional*):
By default, texts are parsed using both Markdown and HTML styles.
You can combine both syntaxes together.
caption_entities (List of :obj:`~pyrogram.types.MessageEntity`):
List of special entities that appear in the caption, which can be specified instead of *parse_mode*.
description (``str``, *optional*):
Short description of the result.
reply_markup (:obj:`~pyrogram.types.InlineKeyboardMarkup`, *optional*):
Inline keyboard attached to the message.
input_message_content (:obj:`~pyrogram.types.InputMessageContent`):
Content of the message to be sent instead of the file.
thumb_url (``str``, *optional*):
Url of the thumbnail for the result.
thumb_width (``int``, *optional*):
Thumbnail width.
thumb_height (``int``, *optional*):
Thumbnail height.
"""
def __init__(
self,
document_url: str,
title: str,
mime_type: str = "application/zip",
id: str = None,
caption: str = "",
parse_mode: Optional["enums.ParseMode"] = None,
caption_entities: List["types.MessageEntity"] = None,
description: str = "",
reply_markup: "types.InlineKeyboardMarkup" = None,
input_message_content: "types.InputMessageContent" = None,
thumb_url: str = None,
thumb_width: int = 0,
thumb_height: int = 0
):
super().__init__("file", id, input_message_content, reply_markup)
self.document_url = document_url
self.title = title
self.mime_type = mime_type
self.caption = caption
self.parse_mode = parse_mode
self.caption_entities = caption_entities
self.description = description
self.thumb_url = thumb_url
self.thumb_width = thumb_width
self.thumb_height = thumb_height
async def write(self, client: "pyrogram.Client"):
document = raw.types.InputWebDocument(
url=self.document_url,
size=0,
mime_type=self.mime_type,
attributes=[]
)
thumb = raw.types.InputWebDocument(
url=self.thumb_url,
size=0,
mime_type="image/jpeg",
attributes=[
raw.types.DocumentAttributeImageSize(
w=self.thumb_width,
h=self.thumb_height
)
]
) if self.thumb_url else None
message, entities = (await utils.parse_text_entities(
client, self.caption, self.parse_mode, self.caption_entities
)).values()
return raw.types.InputBotInlineResult(
id=self.id,
type=self.type,
title=self.title,
description=self.description,
thumb=thumb,
content=document,
send_message=(
await self.input_message_content.write(client, self.reply_markup)
if self.input_message_content
else raw.types.InputBotInlineMessageMediaAuto(
reply_markup=await self.reply_markup.write(client) if self.reply_markup else None,
message=message,
entities=entities
)
)
) | PypiClean |
/NNGT-2.7.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl/nngt/geometry/triangulate.py | from OpenGL.GLU import *
from OpenGL.GL import *
from shapely.geometry import Polygon, MultiPolygon
import numpy as np
def triangulate(polygon):
"""
Returns a triangulation of `polygon`.
Parameters
----------
polygon : a :class:`Shape` object or a :class:`~shapely.geometry.Polygon`
or a :class:`~shapely.geometry.MultiPolygon` which will be decomposed
into triangles.
Returns
-------
triangles : generator containing triplets of triangle vertices.
"""
vertices = []
# opengl callbacks
def _edgeFlagCallback(param1, param2): pass
def _beginCallback(param=None):
vertices = []
def _vertexCallback(vertex, otherData=None):
vertices.append(vertex[:2])
def _combineCallback(vertex, neighbors, neighborWeights, out=None):
out = vertex
return out
def _endCallback(data=None): pass
# init tesselation
tess = gluNewTess()
gluTessProperty(tess, GLU_TESS_WINDING_RULE, GLU_TESS_WINDING_ODD)
# force triangulation of polygons (i.e. GL_TRIANGLES) rather than
# returning triangle fans or strips
gluTessCallback(tess, GLU_TESS_EDGE_FLAG_DATA, _edgeFlagCallback)
gluTessCallback(tess, GLU_TESS_BEGIN, _beginCallback)
gluTessCallback(tess, GLU_TESS_VERTEX, _vertexCallback)
gluTessCallback(tess, GLU_TESS_COMBINE, _combineCallback)
gluTessCallback(tess, GLU_TESS_END, _endCallback)
gluTessBeginPolygon(tess, 0)
# first handle the main polygon(s)
if isinstance(polygon, Polygon):
_tesselate(tess, polygon)
elif isinstance(polygon, MultiPolygon):
for p in polygon.geoms:
_tesselate(tess, p)
# finish polygon and remove tesselator
gluTessEndPolygon(tess)
gluDeleteTess(tess)
return ((vertices[i], vertices[i+1], vertices[i+2])
for i in range(0, len(vertices), 3))
def rnd_pts_in_tr(triangles, num_points):
'''
Generate random points in a set of triangles.
Parameters
----------
triangles : list of :class:`shapely.geometry.Polygon` triangles
num_points : number of points to generate.
Returns
-------
points : np.array of shape (`num_points`, 2)
'''
# normalized areas
areas = [t.area for t in triangles]
areas = np.array(areas) / np.sum(areas)
# vertices and triangle ids
verts = np.array([t.exterior.coords for t in triangles])
idx = np.arange(0, len(triangles), dtype=int)
# choose triangle based on its area
chosen_idx = np.random.choice(idx, size=num_points, p=areas)
chosen = verts[chosen_idx]
# generate random points inside these triangles
r1, r2 = np.random.rand(2, num_points)
As = chosen[:, 0, :]
Bs = chosen[:, 1, :]
Cs = chosen[:, 2, :]
points = (As.T*(1 - np.sqrt(r1))).T \
+ (Bs.T*(np.sqrt(r1)*(1 - r2))).T \
+ (Cs.T*(np.sqrt(r1)*r2)).T
return points
# polygon tesselation
def _tesselate(tess, polygon):
gluTessBeginContour(tess)
for point in polygon.exterior.coords:
point3d = (point[0], point[1], 0)
gluTessVertex(tess, point3d, point3d)
gluTessEndContour(tess)
# then handle each of the holes, if applicable
for hole in polygon.interiors:
gluTessBeginContour(tess)
for point in hole.coords:
point3d = (point[0], point[1], 0)
gluTessVertex(tess, point3d, point3d)
gluTessEndContour(tess) | PypiClean |
/Flask_AdminLTE3-1.0.9-py3-none-any.whl/flask_adminlte3/static/plugins/moment/locale/pl.js |
;(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined'
&& typeof require === 'function' ? factory(require('../moment')) :
typeof define === 'function' && define.amd ? define(['../moment'], factory) :
factory(global.moment)
}(this, (function (moment) { 'use strict';
//! moment.js locale configuration
var monthsNominative = 'styczeń_luty_marzec_kwiecień_maj_czerwiec_lipiec_sierpień_wrzesień_październik_listopad_grudzień'.split(
'_'
),
monthsSubjective = 'stycznia_lutego_marca_kwietnia_maja_czerwca_lipca_sierpnia_września_października_listopada_grudnia'.split(
'_'
),
monthsParse = [
/^sty/i,
/^lut/i,
/^mar/i,
/^kwi/i,
/^maj/i,
/^cze/i,
/^lip/i,
/^sie/i,
/^wrz/i,
/^paź/i,
/^lis/i,
/^gru/i,
];
function plural(n) {
return n % 10 < 5 && n % 10 > 1 && ~~(n / 10) % 10 !== 1;
}
function translate(number, withoutSuffix, key) {
var result = number + ' ';
switch (key) {
case 'ss':
return result + (plural(number) ? 'sekundy' : 'sekund');
case 'm':
return withoutSuffix ? 'minuta' : 'minutę';
case 'mm':
return result + (plural(number) ? 'minuty' : 'minut');
case 'h':
return withoutSuffix ? 'godzina' : 'godzinę';
case 'hh':
return result + (plural(number) ? 'godziny' : 'godzin');
case 'ww':
return result + (plural(number) ? 'tygodnie' : 'tygodni');
case 'MM':
return result + (plural(number) ? 'miesiące' : 'miesięcy');
case 'yy':
return result + (plural(number) ? 'lata' : 'lat');
}
}
var pl = moment.defineLocale('pl', {
months: function (momentToFormat, format) {
if (!momentToFormat) {
return monthsNominative;
} else if (/D MMMM/.test(format)) {
return monthsSubjective[momentToFormat.month()];
} else {
return monthsNominative[momentToFormat.month()];
}
},
monthsShort: 'sty_lut_mar_kwi_maj_cze_lip_sie_wrz_paź_lis_gru'.split('_'),
monthsParse: monthsParse,
longMonthsParse: monthsParse,
shortMonthsParse: monthsParse,
weekdays: 'niedziela_poniedziałek_wtorek_środa_czwartek_piątek_sobota'.split(
'_'
),
weekdaysShort: 'ndz_pon_wt_śr_czw_pt_sob'.split('_'),
weekdaysMin: 'Nd_Pn_Wt_Śr_Cz_Pt_So'.split('_'),
longDateFormat: {
LT: 'HH:mm',
LTS: 'HH:mm:ss',
L: 'DD.MM.YYYY',
LL: 'D MMMM YYYY',
LLL: 'D MMMM YYYY HH:mm',
LLLL: 'dddd, D MMMM YYYY HH:mm',
},
calendar: {
sameDay: '[Dziś o] LT',
nextDay: '[Jutro o] LT',
nextWeek: function () {
switch (this.day()) {
case 0:
return '[W niedzielę o] LT';
case 2:
return '[We wtorek o] LT';
case 3:
return '[W środę o] LT';
case 6:
return '[W sobotę o] LT';
default:
return '[W] dddd [o] LT';
}
},
lastDay: '[Wczoraj o] LT',
lastWeek: function () {
switch (this.day()) {
case 0:
return '[W zeszłą niedzielę o] LT';
case 3:
return '[W zeszłą środę o] LT';
case 6:
return '[W zeszłą sobotę o] LT';
default:
return '[W zeszły] dddd [o] LT';
}
},
sameElse: 'L',
},
relativeTime: {
future: 'za %s',
past: '%s temu',
s: 'kilka sekund',
ss: translate,
m: translate,
mm: translate,
h: translate,
hh: translate,
d: '1 dzień',
dd: '%d dni',
w: 'tydzień',
ww: translate,
M: 'miesiąc',
MM: translate,
y: 'rok',
yy: translate,
},
dayOfMonthOrdinalParse: /\d{1,2}\./,
ordinal: '%d.',
week: {
dow: 1, // Monday is the first day of the week.
doy: 4, // The week that contains Jan 4th is the first week of the year.
},
});
return pl;
}))); | PypiClean |
/NinjaTools-0.68-py3-none-any.whl/ninja_tools/image_finder.py | from typing import Union, Tuple, List
import cv2
import numpy as np
from ninja_tools.utils import Utilities
class ImageFinder:
def __init__(self):
self.u = Utilities()
def find_image(self, haystack: np.ndarray, needle: np.ndarray, threshold: float = 8,
passed_only: bool = True, get_rect: bool = False,
get_center: bool = False, get_dist: bool = False,
show: bool = False) -> Union[bool, List[Tuple[int, int]]]:
"""
Finds an image within another image using OpenCV's matchTemplate function.
Parameters:
- haystack (np.ndarray): The image to search within.
- needle (np.ndarray): The image to search for.
- threshold (float): The minimum match level (0-10) required for a match to be considered a positive result.
- passed_only (bool): If True, returns a boolean indicating whether a match was found.
If False, returns a list of points where the needle image was found.
- get_rect (bool): If True, returns a list of rectangles in the form (x, y, x+w, y+h) where the needle
image was found.
- get_center (bool): If True, returns a list of points representing the center of the rectangles where
the needle image was found.
- get_dist (bool): If True, returns a list of tuples in the form (distance, (x, y)) where distance is the
distance from the center of the haystack image to the center of the rectangle where the needle image was
found.
- show (bool): If True, displays the haystack image with rectangles drawn around the found needles.
Returns:
- If passed_only is True, returns a boolean indicating whether a match was found.
- If passed_only is False, returns a list of points or rectangles where the needle image was found.
"""
try:
result = cv2.matchTemplate(haystack, needle, cv2.TM_CCOEFF_NORMED)
passed = np.amax(result) >= (threshold * 0.1)
if passed and passed_only:
return passed
w_src, h_src = haystack.shape[1], haystack.shape[0]
w, h = needle.shape[1], needle.shape[0]
rectangles = []
points = []
if get_center or get_rect or show:
locations = np.where(result >= (threshold * 0.1))
locations = list(zip(*locations[::-1]))
if not locations:
return False
for loc in locations:
rect = [int(loc[0]), int(loc[1]), w, h]
rectangles.append(rect)
rectangles.append(rect)
rectangles, _ = cv2.groupRectangles(rectangles, groupThreshold=1, eps=0.5)
if show:
for pt in locations:
cv2.rectangle(haystack, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2)
print(f"Max match: {np.amax(result):,.2%}")
cv2.imshow(None, haystack)
if cv2.waitKey(1) & 0xFF == ord('q'):
cv2.destroyAllWindows()
for (x, y, w, h) in rectangles:
if get_rect:
points.append((x, y, x + w, y + h))
else:
center_needle = x + int(w / 2), y + int(h / 2)
if get_dist:
center_haystack = int(w_src / 2), int(h_src / 2)
dist = self.u.get_distance(center_haystack, center_needle)
points.append((dist, center_needle))
else:
points.append(center_needle)
return points
except Exception as e:
print(f'An error occurred: {e}')
return False | PypiClean |
/DeepCell-RetinaMask-0.1.1.tar.gz/DeepCell-RetinaMask-0.1.1/deepcell_retinamask/utils/postprocessing_utils.py | """Functions for and post-processing RetinaNet outputs"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from skimage import morphology
from skimage.measure import label, regionprops
from skimage.morphology import remove_small_objects
from skimage.segmentation import random_walker, relabel_sequential
from skimage.transform import resize
from deepcell_toolbox.compute_overlap import compute_overlap # pylint: disable=E0401
def compute_iou(boxes, mask_image):
overlaps = compute_overlap(boxes.astype('float64'), boxes.astype('float64'))
ind_x, ind_y = np.nonzero(overlaps)
ious = np.zeros(overlaps.shape)
for index in range(ind_x.shape[0]):
mask_a = mask_image[ind_x[index]]
mask_b = mask_image[ind_y[index]]
intersection = np.count_nonzero(np.logical_and(mask_a, mask_b))
union = np.count_nonzero(mask_a + mask_b)
if intersection > 0:
ious[ind_x[index], ind_y[index]] = intersection / union
return ious
def _get_masks(boxes, scores, masks, mask_shape, score_threshold=0.5,
multi_iou_threshold=0.25, binarize_threshold=0.5):
# Get good detections
selection = np.nonzero(scores > score_threshold)[0]
boxes = boxes[selection]
scores = scores[selection]
masks = masks[selection, ..., -1]
# Compute overlap of masks with each other
mask_image = np.zeros(mask_shape, dtype='float32')
for j in range(masks.shape[0]):
mask = masks[j]
box = boxes[j].astype(int)
mask = resize(mask, (box[3] - box[1], box[2] - box[0]))
mask = (mask > binarize_threshold).astype('float32')
mask_image[j, box[1]:box[3], box[0]:box[2]] = mask
ious = compute_iou(boxes, mask_image)
# Identify all the masks with no overlaps and
# add to the label matrix
summed_ious = np.sum(ious, axis=-1)
no_overlaps = np.where(summed_ious == 1)
masks_no_overlaps = mask_image[no_overlaps]
range_no_overlaps = np.arange(1, masks_no_overlaps.shape[0] + 1)
masks_no_overlaps *= np.expand_dims(np.expand_dims(range_no_overlaps, axis=-1), axis=-1)
masks_concat = masks_no_overlaps
# If a mask has a big iou with two other masks, remove it
bad_mask = np.sum(ious > multi_iou_threshold, axis=0)
good_overlaps = np.logical_and(summed_ious > 1, bad_mask < 3)
good_overlaps = np.where(good_overlaps == 1)
# Identify all the ambiguous pixels and resolve
# by performing marker based watershed using unambiguous
# pixels as the markers
masks_overlaps = mask_image[good_overlaps]
range_overlaps = np.arange(1, masks_overlaps.shape[0] + 1)
masks_overlaps_label = masks_overlaps * np.expand_dims(
np.expand_dims(range_overlaps, axis=-1), axis=-1)
masks_overlaps_sum = np.sum(masks_overlaps, axis=0)
ambiguous_pixels = np.where(masks_overlaps_sum > 1)
markers = np.sum(masks_overlaps_label, axis=0)
if np.sum(markers.flatten()) > 0:
markers[markers == 0] = -1
markers[ambiguous_pixels] = 0
foreground = masks_overlaps_sum > 0
segments = random_walker(foreground, markers)
if not (segments == -1).all():
masks_overlaps = np.zeros((
np.amax(segments).astype(int),
masks_overlaps.shape[1],
masks_overlaps.shape[2]
))
for j in range(1, masks_overlaps.shape[0] + 1):
masks_overlaps[j - 1] = segments == j
range_overlaps = np.arange(
masks_no_overlaps.shape[0] + 1,
masks_no_overlaps.shape[0] + masks_overlaps.shape[0] + 1)
range_overlaps = np.epxnad_dims(range_overlaps, axis=-1)
range_overlaps = np.epxnad_dims(range_overlaps, axis=-1)
masks_overlaps *= range_overlaps
masks_concat = np.concatenate([masks_concat, masks_overlaps], axis=0)
return masks_concat, masks_overlaps, masks_no_overlaps
def retinamask_semantic_postprocess(retinanet_outputs,
score_threshold=0.5,
multi_iou_threshold=0.25,
binarize_threshold=0.5,
watershed_threshold=0.5,
perimeter_area_threshold=2,
small_objects_threshold=100):
boxes_batch = retinanet_outputs[-5]
scores_batch = retinanet_outputs[-4]
labels_batch = retinanet_outputs[-3] # pylint: disable=unused-variable
masks_batch = retinanet_outputs[-2]
semantic_batch = retinanet_outputs[-1]
# Create empty label matrix
label_images = np.zeros(
(masks_batch.shape[0], semantic_batch.shape[1], semantic_batch.shape[2]),
dtype='float32')
# Iterate over batches
for i in range(boxes_batch.shape[0]):
boxes = boxes_batch[i]
scores = scores_batch[i]
masks = masks_batch[i]
semantic = semantic_batch[i]
shape = (masks.shape[0], semantic.shape[0], semantic.shape[1])
masks_concat, masks_overlaps, masks_no_overlaps = _get_masks(
boxes, scores, masks, shape,
score_threshold=score_threshold,
multi_iou_threshold=multi_iou_threshold,
binarize_threshold=binarize_threshold)
# Find peaks in watershed that are not within any
# box and perform watershed
semantic_argmax = np.argmax(semantic, axis=-1)
semantic_argmax *= np.sum(masks_concat, axis=0) < 1
foreground = semantic_argmax > 0
inner_most = semantic[..., -1] * (np.sum(masks_concat, axis=0) < 1)
local_maxi = inner_most > watershed_threshold
if np.sum(local_maxi.flatten()) > 0:
markers_semantic = label(local_maxi)
distance = semantic_argmax
segments_semantic = morphology.watershed(
-distance, markers_semantic, mask=foreground)
# Remove misshapen watershed cells
props = regionprops(segments_semantic)
for prop in props:
if prop.perimeter ** 2 / prop.area > perimeter_area_threshold * 4 * np.pi:
segments_semantic[segments_semantic == prop.label] = 0
masks_semantic = np.zeros((np.amax(segments_semantic).astype(int),
semantic.shape[0], semantic.shape[1]))
for j in range(1, masks_semantic.shape[0] + 1):
masks_semantic[j - 1] = segments_semantic == j
range_semantic = np.arange(masks_no_overlaps.shape[0] + masks_overlaps.shape[0] + 1,
masks_no_overlaps.shape[0] + masks_overlaps.shape[0] +
masks_semantic.shape[0] + 1)
masks_semantic *= np.expand_dims(np.expand_dims(range_semantic, axis=-1), axis=-1)
masks_concat = np.concatenate([masks_concat, masks_semantic], axis=0)
label_image = np.sum(masks_concat, axis=0).astype(int)
# Remove small objects
label_image = morphology.remove_small_objects(
label_image, min_size=small_objects_threshold)
# Relabel the label image
label_image, _, _ = relabel_sequential(label_image)
# Store in batched array
label_images[i] = label_image
return label_images
def retinamask_postprocess(outputs,
image_shape=(256, 256),
score_threshold=0.5,
multi_iou_threshold=0.25,
binarize_threshold=0.5,
small_objects_threshold=0,
dtype='float32'):
"""Post processing function for RetinaMask models.
Expects model that produces an output list [boxes, scores, labels, masks]
Args:
boxes_batch: Bounding box predictions.
scores_batch: Scores for each detection.
labels_batch: Label for each detection.
masks_batch: Masks for each detection.
image_shape: Shape of the image.
score_threshold: Score threshold for detections.
multi_iou_threshold: Threshold to suppress detections that have multiple
overlaps with other detections.
binarize_threshold: Threshold to binarize masks.
small_objects_threshold: Area threshold to remove small objects.
dtype: data type of label mask.
Returns:
numpy.array: label mask for the image.
"""
boxes_batch = outputs[-4]
scores_batch = outputs[-3]
labels_batch = outputs[-2] # pylint: disable=unused-variable
masks_batch = outputs[-1]
# Create empty label matrix
label_images = np.zeros(
(masks_batch.shape[0], image_shape[0], image_shape[1]),
dtype=dtype)
# Iterate over batches
for i in range(boxes_batch.shape[0]):
boxes = boxes_batch[i]
scores = scores_batch[i]
masks = masks_batch[i]
shape = (masks.shape[0], image_shape[0], image_shape[1])
masks_concat, _, _ = _get_masks(
boxes, scores, masks, shape,
score_threshold=score_threshold,
multi_iou_threshold=multi_iou_threshold,
binarize_threshold=binarize_threshold)
label_image = np.sum(masks_concat, axis=0).astype(int)
# Remove small objects
label_image = remove_small_objects(label_image, min_size=small_objects_threshold)
# Relabel the label image
label_image, _, _ = relabel_sequential(label_image)
# Store in batched array
label_images[i] = label_image
return label_images | PypiClean |
/AiCoreSentimentStockScraper2021EtaCohort-0.0.2.tar.gz/AiCoreSentimentStockScraper2021EtaCohort-0.0.2/stock_scraper_AiCore/__main__.py | from .config import settingUp
import configparser
import os.path
import getpass
def sqlCreds(DATABASE_TYPE,DBAPI,HOST,USER,PASSWORD,DATABASE,PORT):
'''
This function uses the parameters set by user to connect to PGadmin database
The purpose of this function is to get the correct parameters and credentials
from the user to connect to PGAdmin 4 sql data base via sqlalchamy engine. The
function uses the data provided by the user and creates a .ini file named sqlconfig.ini
The values for the parameters can be found on pgAdmin 4. Most of them are under properties.
Hints:
DATABASE_TYPE: Server type on pgAdmin 4 (all lower case)
DBAPI: psycopg2
HOST: Host name / address on pgAdmin 4
USER: Username on pgAdmin 4
PASSWORD: Password created by user when setting up pgAdmin 4
DATABASE: The name of the database under Databases in pgAdmin 4
PORT: Port in pgAdmin 4
Output:
.ini file contained the paremeters and credentials
'''
config = configparser.ConfigParser()
config['DEFAULT'] = {'DATABASE_TYPE': DATABASE_TYPE,
'DBAPI': DBAPI,
'HOST': HOST,
'USER': USER,
'PASSWORD': PASSWORD,
'DATABASE': DATABASE,
'PORT': PORT}
with open('sqlconfig.ini', 'w') as configfile:
config.write(configfile)
print('Welcome to stock scraper')
print('Lets begin...')
print('Checking if config file is populated')
requiredFields = []
config_parser = configparser.ConfigParser()
settingUp()
config_parser.read('sqlconfig.ini')
DATABASE_TYPE = config_parser['DEFAULT']['DATABASE_TYPE']
DBAPI = config_parser['DEFAULT']['DBAPI']
HOST = config_parser['DEFAULT']['HOST']
USER = config_parser['DEFAULT']['USER']
PASSWORD = config_parser['DEFAULT']['PASSWORD']
DATABASE = config_parser['DEFAULT']['DATABASE']
PORT = config_parser['DEFAULT']['PORT']
configValues = ['DATABASE_TYPE','DBAPI','HOST','USER','PASSWORD','DATABASE','PORT']
configParameters = [DATABASE_TYPE,DBAPI,HOST,USER,PASSWORD,DATABASE,PORT]
counter = 0
for parameters in configParameters:
if parameters=='Replace':
requiredFields.append(configValues[counter])
counter = +1
if not requiredFields:
print('Config file has been populated')
print('Starting scraper')
from .process import stockData,stockHeadlineData,sentimentData
# Declaring datatable names
dt_stockContent = "stock_content"
dt_headlines = "stock_headlines"
dt_sentiment = "stock_sentiment"
stockData(stockContent_name=dt_stockContent, updateFlag=True)
stockHeadlineData(headlineDatatable_name = dt_headlines, stockInfoDatatable_name = dt_stockContent, updateFlag = True)
sentimentData(sentimentDatatable_name=dt_sentiment, updateFlag=True, headlineDatatable_name=dt_headlines)
print('Scraping complete')
else:
print('Config file has NOT been populated')
print('PGAdmin credentials required to proceed')
credsQuestion = input('Would you like to Enter credentials now?(y/n)')
if credsQuestion == 'y':
print('Please enter the value for the following parameters')
DATABASE_TYPE = input('DATABASE_TYPE: ')
DBAPI = input('DBAPI: ')
HOST = input('HOST: ')
USER = input('USER: ')
PASSWORD = getpass.getpass('PASSWORD: ')
DATABASE = input('DATABASE: ')
PORT = input('PORT: ')
sqlCreds(DATABASE_TYPE=DATABASE_TYPE.strip(), DBAPI=DBAPI.strip(),HOST=HOST.strip(),USER=USER.strip(),PASSWORD=PASSWORD.strip(),DATABASE=DATABASE.strip(),PORT=PORT.strip())
from .process import stockData,stockHeadlineData,sentimentData
# Declaring datatable names
dt_stockContent = "stock_content"
dt_headlines = "stock_headlines"
dt_sentiment = "stock_sentiment"
stockData(stockContent_name=dt_stockContent, updateFlag=True)
stockHeadlineData(headlineDatatable_name = dt_headlines, stockInfoDatatable_name = dt_stockContent, updateFlag = True)
sentimentData(sentimentDatatable_name=dt_sentiment, updateFlag=True, headlineDatatable_name=dt_headlines)
print('Scraping complete')
else:
print('You did not select y')
print('Please populate congig.py file and re-run the code')
print('The code is now exiting...')
print('Scraping complete') | PypiClean |
/Dero-0.15.0-py3-none-any.whl/dero/data/psm/summarize/stats.py | from functools import partial
import pandas as pd
import numpy as np
from dero.data.summarize import format_numbers_to_decimal_places
from dero.data.formatters.stars import convert_to_stars
from dero.data.psm.names import matched_var, t_stat_var, diff_var
from dero.data.psm.typing import StrList
def matching_summary_stats(df: pd.DataFrame, matched_df: pd.DataFrame,
treated_var: str, describe_vars: StrList,
entity_var: str,
control_name: str = 'Control', treated_name: str = 'Treated') -> pd.DataFrame:
common_args = (
treated_var,
describe_vars,
entity_var
)
common_kwargs = dict(
treated_name=treated_name
)
summ = mean_and_diff_df_by_treatment(
matched_df,
*common_args,
control_name=matched_var,
**common_kwargs
)
control_summ = mean_and_diff_df_by_treatment(
df,
*common_args,
control_name=control_name,
**common_kwargs
)
summ[control_name] = control_summ[control_name]
summ = summ[[control_name, treated_name, matched_var, diff_var, t_stat_var]]
return summ
def mean_and_diff_df_by_treatment(df: pd.DataFrame, treated_var: str, describe_vars: StrList,
entity_var: str, num_decimals: int = 2, coerce_ints: bool = True,
control_name: str = 'Control', treated_name: str = 'Treated') -> pd.DataFrame:
common_args = (
treated_var,
describe_vars,
entity_var
)
common_kwargs = dict(
treated_name=treated_name,
control_name=control_name
)
mean_df = _stat_df_by_treatment(
df,
*common_args,
stat='mean',
**common_kwargs
)
std_df = _stat_df_by_treatment(
df,
*common_args,
stat='std',
**common_kwargs
)
count_df = _stat_df_by_treatment(
df,
*common_args,
stat='count',
**common_kwargs
)
diff_t = _diff_and_t_df_from_mean_and_std_df(
mean_df,
std_df,
count_df,
control_name=control_name,
treated_name=treated_name
)
summ = pd.concat([mean_df, diff_t], axis=1)
summ = _format_summary_df(summ, num_decimals=num_decimals, coerce_ints=coerce_ints)
return summ
def _format_summary_df(df: pd.DataFrame, float_format: str = '.2f', num_decimals: int = 2,
coerce_ints: bool = True,) -> pd.DataFrame:
# num_formatter = lambda x: f'{x:{float_format}}' if isinstance(x, float) else x
num_formatter = partial(format_numbers_to_decimal_places, decimals=num_decimals, coerce_ints=coerce_ints)
df[diff_var] = df[diff_var].apply(num_formatter)
df[diff_var] = df[diff_var] + df[t_stat_var].apply(
lambda x: convert_to_stars(x) if isinstance(x, float) else x
)
df = df.applymap(num_formatter)
return df
def _stat_df_by_treatment(df: pd.DataFrame, treated_var: str, describe_vars: StrList, entity_var: str ,
stat: str = 'mean',
control_name: str = 'Control', treated_name: str = 'Treated') -> pd.DataFrame:
grouped = df.groupby(treated_var)[describe_vars]
agg_func = getattr(grouped, stat)
stat_df = agg_func().T
stat_df.columns = [control_name, treated_name]
count_series = _count_series(
df,
treated_var,
entity_var,
control_name=control_name,
treated_name=treated_name
)
stat_df = stat_df.append(count_series)
return stat_df
def _diff_and_t_df_from_mean_and_std_df(mean_df: pd.DataFrame, std_df: pd.DataFrame, count_df: pd.DataFrame,
control_name: str = 'Control',
treated_name: str = 'Treated') -> pd.DataFrame:
diff = mean_df[treated_name] - mean_df[control_name]
standard_errors = (
(std_df[control_name] ** 2 ) /count_df[control_name] +
(std_df[treated_name] ** 2 ) /count_df[treated_name]
) ** 0.5
t_stats = diff /standard_errors
t_stats = t_stats.replace(np.inf, '')
df = pd.DataFrame([diff, t_stats]).T
colnames = [diff_var, t_stat_var]
df.columns = colnames
df.loc['N', colnames] = ''
return df
def _count_series(df: pd.DataFrame, treated_var: str, entity_var: str,
control_name: str = 'Control', treated_name: str = 'Treated') -> pd.Series:
count_s = df.groupby(treated_var)[entity_var].count()
count_s.index = [control_name, treated_name]
count_s.name = 'N'
return count_s | PypiClean |
/123_object_detection-0.1.tar.gz/123_object_detection-0.1/slim/nets/inception_v1.py | """Contains the definition for inception v1 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import tf_slim as slim
from nets import inception_utils
# pylint: disable=g-long-lambda
trunc_normal = lambda stddev: tf.truncated_normal_initializer(
0.0, stddev)
def inception_v1_base(inputs,
final_endpoint='Mixed_5c',
include_root_block=True,
scope='InceptionV1'):
"""Defines the Inception V1 base architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']. If
include_root_block is False, ['Conv2d_1a_7x7', 'MaxPool_2a_3x3',
'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3'] will not be available.
include_root_block: If True, include the convolution and max-pooling layers
before the inception modules. If False, excludes those layers.
scope: Optional variable_scope.
Returns:
A dictionary from components of the network to the corresponding activation.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionV1', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
weights_initializer=trunc_normal(0.01)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d],
stride=1, padding='SAME'):
net = inputs
if include_root_block:
end_point = 'Conv2d_1a_7x7'
net = slim.conv2d(inputs, 64, [7, 7], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_2a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Conv2d_2b_1x1'
net = slim.conv2d(net, 64, [1, 1], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Conv2d_2c_3x3'
net = slim.conv2d(net, 192, [3, 3], scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'MaxPool_3a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point:
return net, end_points
end_point = 'Mixed_3b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 32, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 32, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_3c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 192, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 96, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_4a_3x3'
net = slim.max_pool2d(net, [3, 3], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 96, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 208, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 16, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 48, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 224, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4d'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 128, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 256, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 24, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4e'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 112, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 144, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 288, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 64, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 64, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_4f'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'MaxPool_5a_2x2'
net = slim.max_pool2d(net, [2, 2], stride=2, scope=end_point)
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5b'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 256, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 160, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 320, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 32, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0a_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
end_point = 'Mixed_5c'
with tf.variable_scope(end_point):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(net, 384, [1, 1], scope='Conv2d_0a_1x1')
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(net, 192, [1, 1], scope='Conv2d_0a_1x1')
branch_1 = slim.conv2d(branch_1, 384, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(net, 48, [1, 1], scope='Conv2d_0a_1x1')
branch_2 = slim.conv2d(branch_2, 128, [3, 3], scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_3'):
branch_3 = slim.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = slim.conv2d(branch_3, 128, [1, 1], scope='Conv2d_0b_1x1')
net = tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3])
end_points[end_point] = net
if final_endpoint == end_point: return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v1(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV1',
global_pool=False):
"""Defines the Inception V1 architecture.
This architecture is defined in:
Going deeper with convolutions
Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed,
Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich.
http://arxiv.org/pdf/1409.4842v1.pdf.
The default image size used to train this network is 224x224.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is of
shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
"""
# Final pooling and prediction
with tf.variable_scope(
scope, 'InceptionV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_v1_base(inputs, scope=scope)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
net = slim.avg_pool2d(net, [7, 7], stride=1, scope='AvgPool_0a_7x7')
end_points['AvgPool_0a_7x7'] = net
if not num_classes:
return net, end_points
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_0c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v1.default_image_size = 224
inception_v1_arg_scope = inception_utils.inception_arg_scope | PypiClean |
/MeUtils-2023.8.29.13.9.44-py3-none-any.whl/meutils/db/__init__.py |
from meutils.pipe import *
class DB(object):
def __init__(self, is_test=None):
if is_test is None:
if HOST_NAME.__contains__('local') or LOCAL:
is_test = True
self.is_test = is_test
def redis(self, ips=None, password=None, batch=False): # redis集群
"""
# todo: 批量插入([(k, v),])or yield
@param ips:
@param password:
@param batch:
@return:
"""
if self.is_test:
return {}
from rediscluster import RedisCluster # pip install redis-py-cluster
startup_nodes = [dict(zip(['host', 'port'], ip.split(':'))) for ip in ips]
rc = RedisCluster(
startup_nodes=startup_nodes,
decode_responses=True,
skip_full_coverage_check=True,
password=password,
)
if batch:
return rc.pipeline() # DB().redis.execute()
else:
return rc
class Redis(object):
"""
rc.delete
rc.lpush('lpush', 'a', 'b') # ['b', 'a']
rc.rpush('rpush', 'a', 'b') # ['a', 'b']
rc.rpop
rc.lrange('rpush', 0, 10)
rc.lrange('lpush', 0, 10)
rc.hset(name, key=None, value=None, mapping=None)
rc.hget(name, key)
rc.hmget(name, keys, *args)
rc.sadd('sadd', 0)
rc.smembers('sadd')
rc.zadd('zadd', mapping={'a': 888, 'b': 1})
rc.zrange('zadd', 0, 1000, desc=True)
队列 https://www.cnblogs.com/arkenstone/p/7813551.html
https://blog.csdn.net/youyou1543724847/article/details/86756625
"""
def __init__(self, ips=None, password=None, decode_responses=False, ttl=3):
if isinstance(ips, str):
import redis
self.rc = redis.Redis(
*ips.split(':'),
password=password,
decode_responses=decode_responses
)
else:
from rediscluster import RedisCluster
startup_nodes = [dict(zip(['host', 'port'], ip.split(':'))) for ip in ips]
self.rc = RedisCluster(
startup_nodes=startup_nodes,
password=password,
decode_responses=decode_responses, # get对象设置False decode
skip_full_coverage_check=True,
)
self.rc.set_obj = self._set_obj
self.rc.get_obj = self._get_obj
# ttl
cache_fn = ttl_cache(ttl, key=str)
self.rc.get_ttl = cache_fn(self.rc.get)
self.rc.hget_ttl = cache_fn(self.rc.hget) # rc.hget(name, key)
self.rc.get_obj_ttl = cache_fn(self._get_obj)
@timer("RedisInsertData")
def insert(self, values, insert_fn=lambda k, v, p: p.set(k, v)):
with self.rc.pipeline(transaction=False) as p: # 事务:原子性、一致性、隔离性、持久性
_insert_fn = functools.partial(insert_fn, p=p)
for v in tqdm(values): # xJobs
_insert_fn(*v)
p.execute()
def _set_obj(self, key, obj, ex=None, px=None, nx=False, xx=False, keepttl=False):
self.rc.set(key, pickle.dumps(obj), ex, px, nx, xx, keepttl) # dill/pickle
def _get_obj(self, key, default=None):
try:
_ = self.rc.get(key)
return pickle.loads(_) if _ else default
except Exception as e:
logger.error(e)
class SQL(object):
def __init__(self, **kwargs):
self.conn = None # 重写
def get_dataframe(self, sql, chunksize=None, **kwargs):
return pd.read_sql(sql, self.conn, chunksize=chunksize, **kwargs)
def set_dataframe(self, df: pd.DataFrame, table: str, if_exists='replace', index=False, chunksize=None, **kwargs):
df.to_sql(table, self.conn, if_exists=if_exists, index=index, chunksize=chunksize, **kwargs)
class Sqlite(SQL):
def __init__(self, db='test.db', **kwargs):
super().__init__(**kwargs)
import sqlite3
self.conn = sqlite3.connect(db)
self.sqlite_master = pd.read_sql('SELECT * from SQLITE_MASTER', self.conn)
self.sqlite_version = pd.read_sql('SELECT SQLITE_VERSION()', self.conn)
@singleton
class MySQL(SQL):
def __init__(self, url=None, echo=False, db_kwargs=None, **kwargs):
"""
:param url: f'mysql+pymysql://{user}:{password}@{host}:{port}/{database}'
:param echo:
:param kwargs:
"""
super().__init__(**kwargs)
import pymysql
from sqlalchemy import create_engine
url = url or "mysql+pymysql://root:root123456@localhost/test?charset=utf8mb4"
self.conn = create_engine(url, echo=echo, **db_kwargs or {}) # 不兼容
try:
_ = self.tables
except Exception as e:
logger.error(e)
_ = re.split('[:@/?]', url[16:])
self.conn = pymysql.Connect(**dict(zip(['user', 'password', 'host', 'database'], _)))
self.conn.execute = self.conn.cursor().execute
def query(self, sql):
return pd.read_sql(sql, self.conn)
def query_table(self, table_name):
return pd.read_sql_table(table_name, self.conn)
def create_table_or_upsert(
self, df: pd.DataFrame,
table_name: str,
if_exists="append", # "append": 不存在则创建,存在则存入数据
primary_key=''
):
"""Upsert 是 update 和 insert 的组合
:param df:
:param table_name:
:param if_exists:
"append": 不存在则创建,存在则存入数据,
"replace"
"fail"
:param primary_key:
:return:
"""
_ = df.to_sql(table_name, self.conn, index=False, if_exists=if_exists, method='multi') # Literal[if_exists]
if primary_key and primary_key in df: # 初始化一次即可
self.alter_primary_key(table_name, primary_key)
return _
@property
def tables(self):
return pd.read_sql('show tables', self.conn)
@property
def databases(self):
return pd.read_sql('show databases', self.conn)
def get_primary_key_info(self, table_name):
df = pd.read_sql(f"SHOW KEYS FROM {table_name} WHERE Key_name = 'PRIMARY';", self.conn)
return df.to_dict('records')
def alter_primary_key(self, table_name, primary_key):
"""
ALTER TABLE {table_name} ADD PRIMARY KEY ({field1}, {field2}, ...);
ALTER TABLE table_name ADD PRIMARY KEY (column_name(length)); 字符串要指定长度
:param table_name:
:param primary_key:
:return:
"""
_ = self.get_primary_key_info(table_name)
if _: logger.warning(f'exist primary_key: {_}')
self.conn.execute("ALTER TABLE {table_name} DROP PRIMARY KEY")
self.conn.execute(f"ALTER TABLE {table_name} ADD PRIMARY KEY ({primary_key})")
rprint(self.get_primary_key_info(table_name))
if __name__ == '__main__':
# rc = Redis('9736:071.4.9.931'[::-1], password='b***e', decode_responses=True).rc
#
# rc.set('a', 'xxxx')
#
# print(rc.get('a'))
db = MySQL()
print(db.tables) | PypiClean |
/INGInious-0.8.7.tar.gz/INGInious-0.8.7/inginious/frontend/static/js/codemirror/mode/groovy/groovy.js |
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode("groovy", function(config) {
function words(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var keywords = words(
"abstract as assert boolean break byte case catch char class const continue def default " +
"do double else enum extends final finally float for goto if implements import in " +
"instanceof int interface long native new package private protected public return " +
"short static strictfp super switch synchronized threadsafe throw throws trait transient " +
"try void volatile while");
var blockKeywords = words("catch class def do else enum finally for if interface switch trait try while");
var standaloneKeywords = words("return break continue");
var atoms = words("null true false this");
var curPunc;
function tokenBase(stream, state) {
var ch = stream.next();
if (ch == '"' || ch == "'") {
return startString(ch, stream, state);
}
if (/[\[\]{}\(\),;\:\.]/.test(ch)) {
curPunc = ch;
return null;
}
if (/\d/.test(ch)) {
stream.eatWhile(/[\w\.]/);
if (stream.eat(/eE/)) { stream.eat(/\+\-/); stream.eatWhile(/\d/); }
return "number";
}
if (ch == "/") {
if (stream.eat("*")) {
state.tokenize.push(tokenComment);
return tokenComment(stream, state);
}
if (stream.eat("/")) {
stream.skipToEnd();
return "comment";
}
if (expectExpression(state.lastToken, false)) {
return startString(ch, stream, state);
}
}
if (ch == "-" && stream.eat(">")) {
curPunc = "->";
return null;
}
if (/[+\-*&%=<>!?|\/~]/.test(ch)) {
stream.eatWhile(/[+\-*&%=<>|~]/);
return "operator";
}
stream.eatWhile(/[\w\$_]/);
if (ch == "@") { stream.eatWhile(/[\w\$_\.]/); return "meta"; }
if (state.lastToken == ".") return "property";
if (stream.eat(":")) { curPunc = "proplabel"; return "property"; }
var cur = stream.current();
if (atoms.propertyIsEnumerable(cur)) { return "atom"; }
if (keywords.propertyIsEnumerable(cur)) {
if (blockKeywords.propertyIsEnumerable(cur)) curPunc = "newstatement";
else if (standaloneKeywords.propertyIsEnumerable(cur)) curPunc = "standalone";
return "keyword";
}
return "variable";
}
tokenBase.isBase = true;
function startString(quote, stream, state) {
var tripleQuoted = false;
if (quote != "/" && stream.eat(quote)) {
if (stream.eat(quote)) tripleQuoted = true;
else return "string";
}
function t(stream, state) {
var escaped = false, next, end = !tripleQuoted;
while ((next = stream.next()) != null) {
if (next == quote && !escaped) {
if (!tripleQuoted) { break; }
if (stream.match(quote + quote)) { end = true; break; }
}
if (quote == '"' && next == "$" && !escaped && stream.eat("{")) {
state.tokenize.push(tokenBaseUntilBrace());
return "string";
}
escaped = !escaped && next == "\\";
}
if (end) state.tokenize.pop();
return "string";
}
state.tokenize.push(t);
return t(stream, state);
}
function tokenBaseUntilBrace() {
var depth = 1;
function t(stream, state) {
if (stream.peek() == "}") {
depth--;
if (depth == 0) {
state.tokenize.pop();
return state.tokenize[state.tokenize.length-1](stream, state);
}
} else if (stream.peek() == "{") {
depth++;
}
return tokenBase(stream, state);
}
t.isBase = true;
return t;
}
function tokenComment(stream, state) {
var maybeEnd = false, ch;
while (ch = stream.next()) {
if (ch == "/" && maybeEnd) {
state.tokenize.pop();
break;
}
maybeEnd = (ch == "*");
}
return "comment";
}
function expectExpression(last, newline) {
return !last || last == "operator" || last == "->" || /[\.\[\{\(,;:]/.test(last) ||
last == "newstatement" || last == "keyword" || last == "proplabel" ||
(last == "standalone" && !newline);
}
function Context(indented, column, type, align, prev) {
this.indented = indented;
this.column = column;
this.type = type;
this.align = align;
this.prev = prev;
}
function pushContext(state, col, type) {
return state.context = new Context(state.indented, col, type, null, state.context);
}
function popContext(state) {
var t = state.context.type;
if (t == ")" || t == "]" || t == "}")
state.indented = state.context.indented;
return state.context = state.context.prev;
}
// Interface
return {
startState: function(basecolumn) {
return {
tokenize: [tokenBase],
context: new Context((basecolumn || 0) - config.indentUnit, 0, "top", false),
indented: 0,
startOfLine: true,
lastToken: null
};
},
token: function(stream, state) {
var ctx = state.context;
if (stream.sol()) {
if (ctx.align == null) ctx.align = false;
state.indented = stream.indentation();
state.startOfLine = true;
// Automatic semicolon insertion
if (ctx.type == "statement" && !expectExpression(state.lastToken, true)) {
popContext(state); ctx = state.context;
}
}
if (stream.eatSpace()) return null;
curPunc = null;
var style = state.tokenize[state.tokenize.length-1](stream, state);
if (style == "comment") return style;
if (ctx.align == null) ctx.align = true;
if ((curPunc == ";" || curPunc == ":") && ctx.type == "statement") popContext(state);
// Handle indentation for {x -> \n ... }
else if (curPunc == "->" && ctx.type == "statement" && ctx.prev.type == "}") {
popContext(state);
state.context.align = false;
}
else if (curPunc == "{") pushContext(state, stream.column(), "}");
else if (curPunc == "[") pushContext(state, stream.column(), "]");
else if (curPunc == "(") pushContext(state, stream.column(), ")");
else if (curPunc == "}") {
while (ctx.type == "statement") ctx = popContext(state);
if (ctx.type == "}") ctx = popContext(state);
while (ctx.type == "statement") ctx = popContext(state);
}
else if (curPunc == ctx.type) popContext(state);
else if (ctx.type == "}" || ctx.type == "top" || (ctx.type == "statement" && curPunc == "newstatement"))
pushContext(state, stream.column(), "statement");
state.startOfLine = false;
state.lastToken = curPunc || style;
return style;
},
indent: function(state, textAfter) {
if (!state.tokenize[state.tokenize.length-1].isBase) return CodeMirror.Pass;
var firstChar = textAfter && textAfter.charAt(0), ctx = state.context;
if (ctx.type == "statement" && !expectExpression(state.lastToken, true)) ctx = ctx.prev;
var closing = firstChar == ctx.type;
if (ctx.type == "statement") return ctx.indented + (firstChar == "{" ? 0 : config.indentUnit);
else if (ctx.align) return ctx.column + (closing ? 0 : 1);
else return ctx.indented + (closing ? 0 : config.indentUnit);
},
electricChars: "{}",
closeBrackets: {triples: "'\""},
fold: "brace"
};
});
CodeMirror.defineMIME("text/x-groovy", "groovy");
}); | PypiClean |
/Assign_GUP-2017.4.0.zip/Assign_GUP-2017.4.0/src/Assign_GUP/settings.py |
r'''
Support for AGUP program settings
A settings file is used to preserve certain values of the application
(such as window positions and full path to the project file).
The name of the settings file is given in the main window.
Note, the settings file may have the suffix ".ini" on some operating systems.
Remove the settings file to clear any settings.
There is also a menu item to clear this file and reset it to defaults.
This module uses QSettings (http://doc.qt.io/qt-4.8/qsettings.html).
.. note:: Multi-monitor support : method restoreWindowGeometry()
On multi-monitor systems such as laptops, window may be
restored to offscreen position. Here is how it happens:
* geo was saved while window was on 2nd screen while docked
* now re-opened on laptop display and window is off-screen
For now, keep the windows on the main screen
or learn how to edit the settings file.
---------
'''
import datetime
import os, sys
from PyQt4 import QtCore, QtGui
import __init__
orgName = __init__.__settings_orgName__
appName = __init__.__package_name__
GLOBAL_GROUP = '___global___'
class ApplicationQSettings(QtCore.QSettings):
'''
manage and preserve default settings for this application using QSettings
Use the .ini file format and save under user directory
'''
def __init__(self):
QtCore.QSettings.__init__(self,
QtCore.QSettings.IniFormat,
QtCore.QSettings.UserScope,
orgName,
appName)
self.init_global_keys()
def init_global_keys(self):
d = dict(
this_file = self.fileName(),
review_cycle = '', # redundant, treat as non-authoritative
prp_file = '',
version = 1.0,
timestamp = str(datetime.datetime.now())
)
for k, v in d.items():
if self.getKey(GLOBAL_GROUP + '/' + k) in ('', None):
self.setValue(GLOBAL_GROUP + '/' + k, v)
def _keySplit_(self, full_key):
'''
split full_key into (group, key) tuple
:param str full_key: either `key` or `group/key`, default group (unspecified) is GLOBAL_GROUP
'''
if len(full_key) == 0:
raise KeyError('must supply a key')
parts = full_key.split('/')
if len(parts) > 2:
raise KeyError('too many "/" separators: ' + full_key)
if len(parts) == 1:
group, key = GLOBAL_GROUP, str(parts[0])
elif len(parts) == 2:
group, key = map(str, parts)
return group, key
def keyExists(self, key):
'''does the named key exist?'''
return key in self.allKeys()
def getKey(self, key):
'''
return the Python value (not a QVariant) of key or None if not found
:raises TypeError: if key is None
'''
return self.value(key).toPyObject()
def setKey(self, key, value):
'''
set the value of a configuration key, creates the key if it does not exist
:param str key: either `key` or `group/key`
Complement: self.value(key) returns value of key
'''
#?WHY? if not self.keyExists(key):
group, k = self._keySplit_(key)
if group is None:
group = GLOBAL_GROUP
self.remove(key)
self.beginGroup(group)
self.setValue(k, value)
self.endGroup()
if key != 'timestamp':
self.updateTimeStamp()
def getReviewCycle(self):
''' '''
return str(self.getKey(GLOBAL_GROUP + '/review_cycle')) or ''
def setReviewCycle(self, review_cycle): # redundant, treat as non-authoritative
''' '''
key = GLOBAL_GROUP + '/review_cycle'
self.remove(key)
self.setKey(key, str(review_cycle))
def getPrpFile(self):
''' '''
return str(self.getKey(GLOBAL_GROUP + '/prp_file')).strip() or ''
def setPrpFile(self, prp_file):
''' '''
key = GLOBAL_GROUP + '/prp_file'
self.remove(key)
self.setKey(key, str(prp_file))
def resetDefaults(self):
'''
Reset all application settings to default values.
'''
for key in self.allKeys():
self.remove(key)
self.init_global_keys()
def updateTimeStamp(self):
''' '''
self.setKey('timestamp', str(datetime.datetime.now()))
def saveEmailKeywords(self, keyword_dict):
'''
remember the email substitution keywords
'''
group = 'Email_Keywords'
for k, v in keyword_dict.items():
self.setKey(group + '/' + k, v)
def getEmailKeywords(self):
'''
return the email substitution keywords as a dictionary
'''
group = 'Email_Keywords'
self.beginGroup(group)
key_list = self.childKeys()
self.endGroup()
db = {}
for key in key_list:
db[str(key)] = str(self.getKey(group + '/' + key))
return db
def getGroupName(self, window, group):
return group or window.__class__.__name__ + '_geometry'
def saveWindowGeometry(self, window, group=None):
'''
remember where the window was
:param obj window: instance of QWidget
'''
group = self.getGroupName(window, group)
geo = window.geometry()
self.setKey(group + '/x', geo.x())
self.setKey(group + '/y', geo.y())
self.setKey(group + '/width', geo.width())
self.setKey(group + '/height', geo.height())
def restoreWindowGeometry(self, window, group=None):
'''
put the window back where it was
:param obj window: instance of QWidget
'''
group = self.getGroupName(window, group)
width = self.getKey(group + '/width')
height = self.getKey(group + '/height')
if width is None or height is None:
return
window.resize(QtCore.QSize(int(width), int(height)))
x = self.getKey(group + '/x')
y = self.getKey(group + '/y')
if x is None or y is None:
return
# is this window on any available screen?
qdw = QtGui.QDesktopWidget()
x_onscreen = False
y_onscreen = False
for screen_num in range(qdw.screenCount()):
# find the "available" screen dimensions
# (excludes docks, menu bars, ...)
available_rect = qdw.availableGeometry(screen_num)
if available_rect.x() <= int(x) < available_rect.x()+available_rect.width():
x_onscreen = True
if available_rect.y() <= int(y) < available_rect.y()+available_rect.height():
y_onscreen = True
# Move the window to the primary window if it would otherwise be drawn off screen
available_rect = qdw.availableGeometry(qdw.primaryScreen())
if not x_onscreen:
offset = available_rect.x() + available_rect.width()/10
x = available_rect.x() + offset
width = min(int(width), available_rect.width())
if not y_onscreen:
offset = available_rect.y() + available_rect.height()/10
y = available_rect.y() + offset
height = min(int(height), available_rect.height())
window.setGeometry(QtCore.QRect(int(x), int(y), int(width), int(height)))
def saveSplitterDetails(self, window):
'''
remember where the splitter was
:param obj window: instance of QWidget
'''
group = self.__class__.__name__ + '_splitter'
sizes = map(int, window.splitter.sizes())
self.setKey(group + '/widths', ' '.join(map(str, sizes)))
def restoreSplitterDetails(self, window):
'''
put the splitter back where it was
:param obj window: instance of QWidget
'''
group = self.__class__.__name__ + '_splitter'
sizes = self.getKey(group + '/widths')
if sizes is not None:
window.splitter.setSizes(map(int, str(sizes).split()))
def qmain():
ss = ApplicationQSettings()
print ss
#ss.setValue('morning', 'early')
#ss.setValue('main_window/x', 40)
print ss.fileName()
print ss.applicationName()
print ss.organizationName()
print ss.status()
ss.setKey('billy/goat', 'gruff')
for key in ss.allKeys():
print str(key), ss.getKey(key), ss._keySplit_(key)
ss.resetDefaults()
if __name__ == '__main__':
qmain() | PypiClean |
/Axelrod-4.13.0.tar.gz/Axelrod-4.13.0/axelrod/strategies/hunter.py | from typing import List, Optional, Tuple
from axelrod._strategy_utils import detect_cycle
from axelrod.action import Action
from axelrod.player import Player
C, D = Action.C, Action.D
class DefectorHunter(Player):
"""A player who hunts for defectors.
Names:
- Defector Hunter: Original name by Karol Langner
"""
name = "Defector Hunter"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if (
len(self.history) >= 4
and len(opponent.history) == opponent.defections
):
return D
return C
class CooperatorHunter(Player):
"""A player who hunts for cooperators.
Names:
- Cooperator Hunter: Original name by Karol Langner
"""
name = "Cooperator Hunter"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if (
len(self.history) >= 4
and len(opponent.history) == opponent.cooperations
):
return D
return C
def is_alternator(history: List[Action]) -> bool:
for i in range(len(history) - 1):
if history[i] == history[i + 1]:
return False
return True
class AlternatorHunter(Player):
"""A player who hunts for alternators.
Names:
- Alternator Hunter: Original name by Karol Langner
"""
name = "Alternator Hunter"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.is_alt = False
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if len(opponent.history) < 6:
return C
if len(self.history) == 6:
self.is_alt = is_alternator(opponent.history)
if self.is_alt:
return D
return C
class CycleHunter(Player):
"""Hunts strategies that play cyclically, like any of the Cyclers,
Alternator, etc.
Names:
- Cycle Hunter: Original name by Marc Harper
"""
name = "Cycle Hunter"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
super().__init__()
self.cycle = None # type: Optional[Tuple[Action]]
def strategy(self, opponent: Player) -> Action:
"""Actual strategy definition that determines player's action."""
if self.cycle:
return D
cycle = detect_cycle(opponent.history, min_size=3)
if cycle:
if len(set(cycle)) > 1:
self.cycle = cycle
return D
return C
class EventualCycleHunter(CycleHunter):
"""Hunts strategies that eventually play cyclically.
Names:
- Eventual Cycle Hunter: Original name by Marc Harper
"""
name = "Eventual Cycle Hunter"
def strategy(self, opponent: Player) -> None:
"""Actual strategy definition that determines player's action."""
if len(opponent.history) < 10:
return C
if len(opponent.history) == opponent.cooperations:
return C
if len(opponent.history) % 10 == 0:
# recheck
self.cycle = detect_cycle(opponent.history, offset=10, min_size=3)
if self.cycle:
return D
else:
return C
class MathConstantHunter(Player):
"""A player who hunts for mathematical constant players.
Names:
Math Constant Hunter: Original name by Karol Langner
"""
name = "Math Constant Hunter"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def strategy(self, opponent: Player) -> Action:
"""
Check whether the number of cooperations in the first and second halves
of the history are close. The variance of the uniform distribution (1/4)
is a reasonable delta but use something lower for certainty and avoiding
false positives. This approach will also detect a lot of random players.
"""
n = len(self.history)
if n >= 8 and opponent.cooperations and opponent.defections:
start1, end1 = 0, n // 2
start2, end2 = n // 4, 3 * n // 4
start3, end3 = n // 2, n
count1 = opponent.history[start1:end1].count(C) + self.history[
start1:end1
].count(C)
count2 = opponent.history[start2:end2].count(C) + self.history[
start2:end2
].count(C)
count3 = opponent.history[start3:end3].count(C) + self.history[
start3:end3
].count(C)
ratio1 = 0.5 * count1 / (end1 - start1)
ratio2 = 0.5 * count2 / (end2 - start2)
ratio3 = 0.5 * count3 / (end3 - start3)
if abs(ratio1 - ratio2) < 0.2 and abs(ratio1 - ratio3) < 0.2:
return D
return C
class RandomHunter(Player):
"""A player who hunts for random players.
Names:
- Random Hunter: Original name by Karol Langner
"""
name = "Random Hunter"
classifier = {
"memory_depth": float("inf"), # Long memory
"stochastic": False,
"long_run_time": False,
"inspects_source": False,
"manipulates_source": False,
"manipulates_state": False,
}
def __init__(self) -> None:
self.countCC = 0
self.countDD = 0
super().__init__()
def strategy(self, opponent: Player) -> Action:
"""
A random player is unpredictable, which means the conditional frequency
of cooperation after cooperation, and defection after defections, should
be close to 50%... although how close is debatable.
"""
# Update counts
if len(self.history) > 1:
if self.history[-2] == C and opponent.history[-1] == C:
self.countCC += 1
if self.history[-2] == D and opponent.history[-1] == D:
self.countDD += 1
n = len(self.history)
if n > 10:
probabilities = []
if self.cooperations > 5:
probabilities.append(self.countCC / self.cooperations)
if self.defections > 5:
probabilities.append(self.countDD / self.defections)
if probabilities and all(
[abs(p - 0.5) < 0.25 for p in probabilities]
):
return D
return C | PypiClean |
/BanglaSpeech2Text-1.0.7-py3-none-any.whl/banglaspeech2text/cli.py | import argparse
from mimetypes import guess_type
def is_audio_file(filename):
return guess_type(filename)[0].startswith("audio")
def use_mic(stt):
import speech_recognition as sr
r = sr.Recognizer()
while True:
try:
with sr.Microphone() as source:
print("Say something!")
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
output = stt.recognize(audio)
print(output)
except KeyboardInterrupt:
print("Exiting...")
exit()
except Exception as e:
print(e)
continue
def main():
parser = argparse.ArgumentParser(description="Bangla Speech to Text")
parser.add_argument(
"input",
metavar="INPUT",
type=str,
nargs="*",
help="input file(s) or list of files",
)
parser.add_argument("-gpu", action="store_true", help="use gpu", default=False)
parser.add_argument("-c", "--cache", type=str, help="cache directory", default=None)
parser.add_argument("-o", "--output", type=str, help="output directory")
parser.add_argument("-m", "--model", type=str, help="model name", default="base")
parser.add_argument(
"-s", "--split", action="store_true", help="split audio file", default=False
)
parser.add_argument(
"-sm", "--min_silence_length", type=int, help="min_silence_length", default=500
)
parser.add_argument(
"-st", "--silence_thresh", type=int, help="silence_thresh", default=-16
)
parser.add_argument("-sp", "--padding", type=int, help="padding", default=300)
parser.add_argument("--list", action="store_true", help="list of available models")
parser.add_argument("--info", action="store_true", help="show model info")
parser.add_argument("--mic", action="store_true", help="use microphone")
args = parser.parse_args()
from banglaspeech2text.utils import nice_model_list
if args.list:
print(
f"{nice_model_list()}\n\nFor more models, visit https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&language=bn&sort=likes"
)
return
from banglaspeech2text.speech2text import Speech2Text, Model
if args.info:
model = Model(
args.model,
args.cache,
load_pieline=False,
)
print(model)
return
if not args.input and not args.mic:
parser.print_help()
return
sst = Speech2Text(args.model, args.cache, args.gpu)
if args.mic:
use_mic(sst)
return
output = ""
audio_files = []
for filename in args.input:
if is_audio_file(filename):
audio_files.append(filename)
else:
with open(filename, "r") as f:
audio_files.extend([line.strip() for line in f])
n = len(audio_files)
for filename in audio_files:
output += sst.recognize(
filename,
split=args.split,
min_silence_length=args.min_silence_length,
silence_threshold=args.silence_thresh,
padding=args.padding,
)
if n > 1:
output += "\n" + "=" * 50 + "\n"
n -= 1
if args.output:
with open(args.output, "w") as f:
f.write(output)
else:
print(output)
if __name__ == "__main__":
main() | PypiClean |
/CRIkit2-0.4.4.tar.gz/CRIkit2-0.4.4/crikit/ui/dialog_subResidualOptions.py | import sys as _sys
# Generic imports for QT-based programs
from PyQt5.QtWidgets import (QApplication as _QApplication,
QDialog as _QDialog)
# Import from Designer-based GUI
from crikit.ui.qt_SubResidualOptions import Ui_Dialog as Ui_ResidualOptions ### EDIT ###
# Generic imports for MPL-incorporation
import matplotlib as _mpl
_mpl.use('Qt5Agg')
_mpl.rcParams['font.family'] = 'sans-serif'
_mpl.rcParams['font.size'] = 10
class DialogSubResidualOptions(_QDialog):
"""
"""
RESIDUAL_FREQ = [-1500, -400]
def __init__(self, parent = None):
super(DialogSubResidualOptions, self).__init__(parent) ### EDIT ###
self.ui = Ui_ResidualOptions() ### EDIT ###
self.ui.setupUi(self) ### EDIT ###
self.ui.spinBoxMax.setValue(self.RESIDUAL_FREQ[0])
self.ui.spinBoxMax.setValue(self.RESIDUAL_FREQ[1])
@staticmethod
def dialogSubResidualOptions(parent = None,
imgloaded = False,
nrbloaded = False):
"""
Retrieve dark subtraction dialog results
Parameters
----------
imgloaded : (bool)
Is there an HSI image loaded?
nrbloaded : (bool)
Is there an NRB loaded?
Returns
----------
out : dict{'submain' : bool, 'subnrb' : bool, 'subrange' : list}
In order: subtract residual from image, subtract residual from NRB,
range to subtract from.
"""
dialog = DialogSubResidualOptions(parent)
# If nrb loaded, check and enable checkbox
dialog.ui.checkBoxBG.setChecked(nrbloaded)
dialog.ui.checkBoxBG.setEnabled(nrbloaded)
dialog.subnrb = nrbloaded
# If img is loaded, check and enable checkbox
dialog.ui.checkBoxMain.setChecked(imgloaded)
dialog.ui.checkBoxMain.setEnabled(imgloaded)
dialog.submain = imgloaded
result = dialog.exec_()
if result == 1:
ret = {}
freq = [dialog.ui.spinBoxMin.value(),
dialog.ui.spinBoxMax.value()]
ret['subrange'] = freq
ret['submain'] = dialog.ui.checkBoxMain.isChecked()
ret['subnrb'] = dialog.ui.checkBoxBG.isChecked()
return ret
else:
return None
if __name__ == '__main__':
app = _QApplication(_sys.argv)
app.setStyle('Cleanlooks')
out = DialogSubResidualOptions.dialogSubResidualOptions(imgloaded=True, nrbloaded=True)
print(out)
app.exec_() | PypiClean |
/GTW-1.2.6.tar.gz/GTW-1.2.6/__test__/migration.py |
from __future__ import print_function
from _GTW.__test__.model import *
class _Migration_Scaffold_ (Scaffold.__class__) :
Backend_Parameters = dict \
( Scaffold.Backend_Parameters
, HPS = "'hps:///test.hps'"
, SQL = "'sqlite:///test.sql'"
, sq = "'sqlite:///test.sql'"
)
# end class _Migration_Scaffold_
Scaffold = _Migration_Scaffold_ ()
_test_code = r"""
>>> scope = Scaffold.scope (%(p1)s, %(n1)s) # doctest:+ELLIPSIS
Creating new scope MOMT__...
>>> apt_s, url_s = scope.app_type, scope.db_url
>>> PAP = scope.PAP
>>> SRM = scope.SRM
>>> Auth = scope.Auth
>>> x = SRM.Boat_Class (u"29er", max_crew = 2)
>>> x = SRM.Boat_Class (u"420er", max_crew = 2)
>>> x = SRM.Boat_Class (u"470er", max_crew = 2)
>>> x = SRM.Boat_Class (u"49er", max_crew = 2)
>>> x = SRM.Boat_Class (u"Aquila Kiel", max_crew = 3)
>>> sw= x.copy (u"Aquila Schwert", max_crew = 3)
>>> x = SRM.Boat_Class (u"Fam", max_crew = 3)
>>> x = SRM.Boat_Class (u"Finn-Dinghy", max_crew = 1)
>>> x = SRM.Boat_Class (u"Korsar", max_crew = 2)
>>> x = SRM.Boat_Class (u"Laser", max_crew = 1)
>>> x = SRM.Boat_Class (u"Laser 4.7", max_crew = 1)
>>> x = SRM.Boat_Class (u"Laser Master", max_crew = 1)
>>> x = SRM.Boat_Class (u"Laser Radial", max_crew = 1)
>>> x = SRM.Boat_Class (u"O-Jolle", max_crew = 1)
>>> x = SRM.Boat_Class (u"Optimist", max_crew = 1)
>>> x = SRM.Boat_Class (u"Pirat Regatta", max_crew = 2)
>>> x = SRM.Boat_Class (u"Pirat Klassik", max_crew = 2)
>>> x = SRM.Boat_Class (u"Pirat Schulboot", max_crew = 2)
>>> x = SRM.Boat_Class (u"Pirat", max_crew = 2)
>>> x = SRM.Boat_Class (u"Robby Jolle", max_crew = 2)
>>> x = SRM.Boat_Class (u"Seascape 18", max_crew = 4)
>>> x = SRM.Boat_Class (u"Zoom8", max_crew = 1)
>>> sw.last_cid
7
>>> for c in scope.uncommitted_changes :
... print (c)
<Create SRM.Boat_Class ('29er', 'SRM.Boat_Class'), new-values = {'last_cid' : '1', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('420er', 'SRM.Boat_Class'), new-values = {'last_cid' : '2', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('470er', 'SRM.Boat_Class'), new-values = {'last_cid' : '3', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('49er', 'SRM.Boat_Class'), new-values = {'last_cid' : '4', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Aquila Kiel', 'SRM.Boat_Class'), new-values = {'last_cid' : '5', 'max_crew' : '3'}>
<Copy SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '7'}>
<Create SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '6', 'max_crew' : '3'}>
<Create SRM.Boat_Class ('Fam', 'SRM.Boat_Class'), new-values = {'last_cid' : '8', 'max_crew' : '3'}>
<Create SRM.Boat_Class ('Finn-Dinghy', 'SRM.Boat_Class'), new-values = {'last_cid' : '9', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Korsar', 'SRM.Boat_Class'), new-values = {'last_cid' : '10', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Laser', 'SRM.Boat_Class'), new-values = {'last_cid' : '11', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Laser 4.7', 'SRM.Boat_Class'), new-values = {'last_cid' : '12', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Laser Master', 'SRM.Boat_Class'), new-values = {'last_cid' : '13', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Laser Radial', 'SRM.Boat_Class'), new-values = {'last_cid' : '14', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('O-Jolle', 'SRM.Boat_Class'), new-values = {'last_cid' : '15', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Optimist', 'SRM.Boat_Class'), new-values = {'last_cid' : '16', 'max_crew' : '1'}>
<Create SRM.Boat_Class ('Pirat Regatta', 'SRM.Boat_Class'), new-values = {'last_cid' : '17', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Pirat Klassik', 'SRM.Boat_Class'), new-values = {'last_cid' : '18', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Pirat Schulboot', 'SRM.Boat_Class'), new-values = {'last_cid' : '19', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Pirat', 'SRM.Boat_Class'), new-values = {'last_cid' : '20', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Robby Jolle', 'SRM.Boat_Class'), new-values = {'last_cid' : '21', 'max_crew' : '2'}>
<Create SRM.Boat_Class ('Seascape 18', 'SRM.Boat_Class'), new-values = {'last_cid' : '22'}>
<Create SRM.Boat_Class ('Zoom8', 'SRM.Boat_Class'), new-values = {'last_cid' : '23', 'max_crew' : '1'}>
>>> scope.commit ()
>>> x = SRM.Boat (('Optimist',), 1, u"AUT")
>>> x = SRM.Boat (('Optimist',), 2, u"AUT")
>>> x = SRM.Boat (('Laser',), 3, u"AUT")
>>> x = SRM.Boat (('Seascape 18',), 14, u"AUT")
>>> scope.commit ()
>>> bc = SRM.Boat_Class.instance (u"Optimist")
>>> ys = SRM.Handicap ("Yardstick")
>>> b = SRM.Boat.instance_or_new ('Optimist', u"1107", u"AUT", raw = True)
>>> p = PAP.Person.instance_or_new (u"Tanzer", u"Christian")
>>> s = SRM.Sailor.instance_or_new (p.epk_raw, nation = u"AUT", mna_number = u"29676", raw = True) ### 1
>>> rev = SRM.Regatta_Event (u"Himmelfahrt", dict (start = u"20080501", raw = True), raw = True)
>>> reg = SRM.Regatta_C (rev, bc)
>>> reh = SRM.Regatta_H (rev, ys)
>>> bir = SRM.Boat_in_Regatta (b, reg, skipper = s)
>>> rr1 = SRM.Race_Result (bir, 1, points = 8)
>>> rr2 = SRM.Race_Result (bir, 2, points = 4)
>>> scope.commit ()
>>> sw.last_cid
7
>>> scope.MOM.Id_Entity.count
36
>>> int (scope.query_changes (parent = None).count ())
36
>>> int (scope.query_changes ().count ())
37
>>> int (scope.ems.max_cid)
37
>>> bc.set (loa = 2.43)
1
>>> SRM.Boat_Class.instance (u"Laser").set (sail_area = 7.06, loa = 4.064, beam = 1.422)
3
>>> SRM.Boat_Class.instance (u"Seascape 18").set (loa = 5.45, beam = 2.45, sail_area = 23)
3
>>> scope.commit ()
>>> MOM.B = True
>>> print (sw.last_cid) ### X
7
>>> scope.MOM.Id_Entity.count
36
>>> print (sw.last_cid) ### Y
7
>>> MOM.B = False
>>> int (scope.query_changes ().count ())
40
>>> int (scope.ems.max_cid)
40
>>> len (scope.SRM.Regatta_Event.query ().first ().regattas)
2
>>> b = SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> print (b.last_cid, sw.last_cid, b is sw)
7 7 True
>>> c = scope.query_changes (cid = b.last_cid).one ()
>>> print (c) ### change in source scope
<Copy SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '7'}>
<Create SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '6', 'max_crew' : '3'}>
>>> len (c.children)
1
>>> int (c.cid), int (c.children [0].cid)
(7, 6)
>>> [s for s in scope if not s.last_cid] ### before expunge
[]
>>> sum ((not s.last_cid) for s in scope), sum (bool (s.last_cid) for s in scope) ### before expunge
(0, 36)
>>> if hasattr (scope.ems.session, "expunge") : scope.ems.session.expunge ()
>>> [s for s in scope if not s.last_cid] ### after expunge
[]
>>> sum ((not s.last_cid) for s in scope), sum (bool (s.last_cid) for s in scope) ### after expunge
(0, 36)
>>> scope.query_changes (type_name = "SRM.Boat_Class").count ()
26
>>> b = scope.SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> print (b.last_cid) ### before migration
7
Save contents of scope to database and destroy scope:
>>> scope.ems.compact ()
>>> scope.destroy ()
Now, we migrate all objects and the change history to a new backend. All
entities, changes, cids, and pids should be identical afterwards:
>>> db_url = "hps:////tmp/gtw_test_migration.gtw"
>>> apt_t, url_t = Scaffold.app_type_and_url (db_url)
>>> apt_t.delete_database (url_t) # 1
>>> db_man_s = Scaffold.DB_Man.connect (apt_s, url_s)
>>> db_man_t = Scaffold.DB_Man.create (apt_t, url_t, db_man_s)
>>> db_man_s.destroy ()
>>> db_man_t.destroy ()
>>> scope_s = Scaffold.scope (url_s, create = False) # doctest:+ELLIPSIS
Loading scope MOMT__...
>>> scope_t = Scaffold.scope (url_t, create = False) # doctest:+ELLIPSIS
Loading scope MOMT__...
>>> tuple (s.MOM.Id_Entity.count for s in (scope_s, scope_t))
(36, 36)
>>> all (s.as_pickle_cargo () == t.as_pickle_cargo () for (s, t) in zip (scope_s, scope_t))
True
>>> int (scope_t.ems.max_cid)
40
>>> len (scope_t.SRM.Regatta_Event.query ().first ().regattas)
2
>>> [s for (s, t) in zip (scope_s, scope_t) if s.last_cid != t.last_cid or not s.last_cid]
[]
>>> [s.query_changes (type_name = "SRM.Boat_Class").count () for s in (scope_t, scope_s)]
[26, 26]
>>> bs = scope_s.SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> bt = scope_t.SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> print (bs.last_cid, bt.last_cid) ### migrated to HPS
7 7
Now we delete the original database and then migrate back into the
original app-type/backend. Again, all entities, changes, cids,
and pids should still be identical:
>>> scope_s.destroy ()
>>> scope_t.destroy ()
>>> apt_s.delete_database (url_s) # 2
>>> db_man_t = Scaffold.DB_Man.connect (apt_t, url_t)
>>> db_man_u = Scaffold.DB_Man.create (apt_s, url_s, db_man_t)
>>> db_man_t.destroy ()
>>> db_man_u.destroy ()
>>> scope_t = Scaffold.scope (url_t, create = False) # doctest:+ELLIPSIS
Loading scope MOMT__...
>>> scope_u = Scaffold.scope (url_s, create = False) # doctest:+ELLIPSIS
Loading scope MOMT__...
>>> tuple (s.MOM.Id_Entity.count for s in (scope_t, scope_u))
(36, 36)
>>> all (s.as_pickle_cargo () == t.as_pickle_cargo () for (s, t) in zip (scope_t, scope_u))
True
>>> int (scope_u.ems.max_cid)
40
>>> len (scope_u.SRM.Regatta_Event.query ().first ().regattas)
2
>>> [s for (s, t) in zip (scope_t, scope_u) if s.last_cid != t.last_cid or not s.last_cid]
[]
>>> b = scope_u.SRM.Boat_Class.query (Q.RAW.name == u"Aquila Schwert").one ()
>>> print (b.last_cid) ### after migration
7
>>> c = scope_u.query_changes (cid = b.last_cid).one () ### mig scope
>>> print (c)
<Copy SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '7'}>
<Create SRM.Boat_Class ('Aquila Schwert', 'SRM.Boat_Class'), new-values = {'last_cid' : '6', 'max_crew' : '3'}>
>>> len (c.children)
1
>>> int (c.cid), int (c.children [0].cid)
(7, 6)
>>> scope_u.query_changes (type_name = "SRM.Boat_Class").count ()
26
Lets clean up::
>>> scope_t.destroy ()
>>> scope_u.destroy ()
>>> apt_t.delete_database (url_t) # 3
>>> apt_s.delete_database (url_s) # 4
"""
__test__ = Scaffold.create_test_dict \
( dict
( test_code = _test_code
)
, ignore = ("HPS", )
)
### __END__ migration | PypiClean |
/Buoy-Client-0.0.2.tar.gz/Buoy-Client-0.0.2/buoy/client/notification/common.py |
import json
import logging
import re
from enum import IntEnum, unique
from buoy.client.device.common.item import BaseItem, DataEncoder
from buoy.client.device.common.nmea0183 import WIMDA
from buoy.client.device.currentmeter.item import ACMPlusItem
from buoy.client.notification.exceptions import ValidationError
logger = logging.getLogger(__name__)
@unique
class NoticeType(IntEnum):
NOTIFICATION = 1
DATA = 2
class NotificationLevel(IntEnum):
LOW = 10
NORMAL = 5
HIGHT = 3
CRITICAL = 1
class NoticeBase(BaseItem):
def __init__(self, notice_type: NoticeType, **kwargs):
self.level = kwargs.pop('level', NotificationLevel.NORMAL)
self.daemon = kwargs.pop('daemon', None)
super(NoticeBase, self).__init__(**kwargs)
self.type = notice_type
@property
def level(self):
"""
:return: Notice level
:rtype: Enum
"""
return self._level
@level.setter
def level(self, value):
if type(value) is NotificationLevel:
value = value.value
self._level = value
@property
def type(self):
"""
:return: Notice type
:rtype: Enum
"""
return self._type
@type.setter
def type(self, value):
if type(value) is NoticeType:
value = value.value
self._type = value
@property
def daemon(self):
return self._daemon
@daemon.setter
def daemon(self, value):
self._daemon = value
def to_json(self):
return json.dumps(self, sort_keys=True, cls=DataEncoder)
def __str__(self):
return "{datetime} - {level} - {type}".format(**dict(self))
class NoticeData(NoticeBase):
def __init__(self, **kwargs):
self.data = kwargs.pop('data', None)
self.device = kwargs.pop('device', None)
super(NoticeData, self).__init__(notice_type=NoticeType.DATA, **kwargs)
@property
def data(self):
"""
:return: Notification level
:rtype: Enum
"""
return self._data
@data.setter
def data(self, input):
value = None
cls = type(input)
if issubclass(cls, BaseItem):
value = input
elif type(input) is dict:
if self.device == 'ACMPlus':
value = ACMPlusItem(input)
elif self.device == 'PB200':
value = WIMDA(input)
self._data = value
@property
def device(self):
return self._device
@device.setter
def device(self, value: str):
self._device = value
class Notification(NoticeBase):
def __init__(self, message: str, **kwargs):
self.phone = kwargs.pop('phone', None)
super(Notification, self).__init__(notice_type=NoticeType.NOTIFICATION, **kwargs)
self.message = message
@property
def message(self):
"""
:return: Message
:rtype: Decimal
"""
return self._message
@message.setter
def message(self, value):
self._message = value
@property
def phone(self):
return self._phone
@phone.setter
def phone(self, value):
if value:
self.validate_mobile(value)
self._phone = value
@staticmethod
def validate_mobile(value):
""" Raise a ValidationError if the value looks like a mobile telephone number.
"""
rule = re.compile(r'^(?:\+?34)?\d{9,13}$|^\d{4}$')
if not rule.search(value):
msg = u"Invalid mobile number."
raise ValidationError(msg)
def __str__(self):
return "{datetime} - {level} - {type} - {message} - {phone}".format(**dict(self)) | PypiClean |
/AZMusicAPI-1.2.8-py3-none-any.whl/MusicAPI/__init__.py | import requests
# AZ Studio版权所有
# 该项目仅供娱乐 请勿用于违法事业 违者与AZ Studio无关
def getmusic(keyword):
#获得指定关键词的歌曲 关键词可为 歌手/歌曲名/专辑名
#keyword:关键词
#返回值说明:正常返回列表 "Error 0":没有结果 "Error 1":用户未输入
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "Hm_lvt_cdb524f42f0ce19b169a8071123a4797=1690028471; _ga=GA1.2.291025627.1690028471; _gid=GA1.2.1906759595.1690028471; SL_G_WPT_TO=zh; SL_GWPT_Show_Hide_tmp=1; SL_wptGlobTipTmp=1; Hm_lpvt_cdb524f42f0ce19b169a8071123a4797=1690028564; _gat=1; Hm_Iuvt_cdb524f42f0ce19b169b8072123a4727=WjT5ktibAJwfEFyQFeJAEFcTxpwYHCeK; _ga_ETPBRPM9ML=GS1.2.1690028471.1.1.1690028577.47.0.0",
"Host": "www.kuwo.cn",
"Referer": "https://www.kuwo.cn/search/list?key=%E7%AC%BC",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Secret": "09362e5991f0846bff719a26e1a0e0ac7bd0b3c9f8ab5fdf000c7b88ecfa727000a0ba6e",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\""
}
search_url = 'https://www.kuwo.cn/api/www/search/searchMusicBykeyWord?'
search_data = {
'key': keyword,
'pn': '1',
'rn': '20',
'httpsStatus': '1',
'reqId': '7d9e2c60-288a-11ee-9cdf-3f476f1ba25e',
"plat":"web_www"
}
if len(keyword)>0:
response_data = requests.get(search_url, params=search_data, headers=headers, timeout=20).json()
songs_data = response_data['data']['list']
if int(response_data['data']['total']) <= 0:
return "Error 0"
else:
data=[]
for i in range(len(songs_data)):
try:
data.append({"songname":songs_data[i]['name'],"singer":songs_data[i]['artist'],"album":songs_data[i]['album'],"pic":songs_data[i]['albumpic'],"rid":songs_data[i]['rid'],"reqId":response_data['reqId']})
except:
pass
return data
else:
return "Error 1"
def geturl(rid,reqId):
#获取音乐的音频地址
#rid:你要解析的歌曲在getmusic返回的rid reqId:你要解析的歌曲在getmusic返回的reqId
#返回值:正常返回音频链接 "Error 3":歌曲需要单曲付费或rid/reqId不正确
headers = {
"Accept": "application/json, text/plain, */*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "zh-CN,zh;q=0.9",
"Connection": "keep-alive",
"Cookie": "Hm_lvt_cdb524f42f0ce19b169a8071123a4797=1690028471; _ga=GA1.2.291025627.1690028471; _gid=GA1.2.1906759595.1690028471; SL_G_WPT_TO=zh; SL_GWPT_Show_Hide_tmp=1; SL_wptGlobTipTmp=1; Hm_lpvt_cdb524f42f0ce19b169a8071123a4797=1690028564; _gat=1; Hm_Iuvt_cdb524f42f0ce19b169b8072123a4727=WjT5ktibAJwfEFyQFeJAEFcTxpwYHCeK; _ga_ETPBRPM9ML=GS1.2.1690028471.1.1.1690028577.47.0.0",
"Host": "www.kuwo.cn",
"Referer": "https://www.kuwo.cn/search/list?key=%E7%AC%BC",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Secret": "09362e5991f0846bff719a26e1a0e0ac7bd0b3c9f8ab5fdf000c7b88ecfa727000a0ba6e",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
"sec-ch-ua": "\"Not.A/Brand\";v=\"8\", \"Chromium\";v=\"114\", \"Google Chrome\";v=\"114\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"Windows\""
}
music_url = 'https://www.kuwo.cn/api/v1/www/music/playUrl?mid={}&type=music&httpsStatus=1&reqId={}&plat=web_www&from='.format(rid, reqId)
response_data = requests.get(music_url,headers=headers).json()
try:
song_url = response_data['data'].get('url')
except:
return "Error 3"
return song_url | PypiClean |
/IQM_Vis-0.2.5.80-py3-none-any.whl/IQM_Vis/metrics/NLPD_torch/utils/fourier.py |
import math
import torch
from torchinterp1d import Interp1d
__all__ = ['harmonic_column', 'raised_cosine', 'steer_to_harmonics',
'point_operation_filter']
def harmonic_column(harmonic: torch.Tensor,
angles: torch.Tensor,
phase: str) -> torch.Tensor:
"""
For a singular harmonic, generates the neccesary values to compute
steering matrix.
FFor the description of the input parameters and exceptions raised by
this function, please see the documentation of the
:func:`expert.models.pyramids.SteerableWavelet.steer_to_harmonics`
function.
Returns
-------
column : torch.Tensor
Column to create a steer matrix for harmonic.
"""
if harmonic == 0:
column = torch.ones(angles.size(0), 1)
else:
args = harmonic * angles
sin_args = torch.sin(args).unsqueeze(1)
cos_args = torch.cos(args).unsqueeze(1)
if phase is 'sin':
column = torch.cat([sin_args, -cos_args], axis=1)
else:
column = torch.cat([cos_args, sin_args], axis=1)
return column
def raised_cosine(width: int = 1,
position: float = -0.5,
func_min: float = 0.0,
func_max: float = 1.0,
size: int = 256) -> torch.Tensor:
"""
Raised cosine function.
Returns the X and Y values of a raised cosine soft threshold function.
Parameters
----------
width : int, optional (default=1)
Width of region for transition.
position : float, optional (default=-0.5)
The location of the center of threshold.
func_min : float, optional (default=0.0)
Value to the left of the transition.
func_max : float, optional (default=1.0)
Value to the right of the transition.
size : int, optional (default=256)
Number of points to sample is `size+2`.
Returns
-------
X : torch.Tensor
X values for rasied cosine function.
Y : torch.Tensor
Y values for raised cosine function.
"""
X = math.pi * torch.arange(-size-1, 2)/ (2 * size)
Y = func_min + (func_max-func_min) * torch.cos(X)**2
Y[0] = Y[1]
Y[-1] = Y[-2]
X = position + (2*width/math.pi) * (X + math.pi/4)
return (X, Y)
def steer_to_harmonics(harmonics: torch.Tensor,
angles: torch.Tensor,
phase: str = 'sin') -> torch.Tensor:
"""
Maps a directional basis set onto the angular Fourier harmonics.
Parameters
----------
harmonics : torch.Tensor
angles : torch.Tensor
phase : str, optional (default='sin')
Raises
------
TODO: error checking input dimensions
Returns
-------
harmonics_matrix : torch.Tensor
"""
num = 2*harmonics.size(0) - (harmonics == 0).sum()
zero_indices = harmonics == 0
zero_imtx = torch.ones(angles.size(0), zero_indices.sum())
non_zero_imtx = angles.repeat(num-zero_indices.sum(), 1)
columns = [harmonic_column(h, angles, phase) for h in harmonics]
matrix = torch.cat(columns, axis=1)
harmonic_matrix = torch.pinverse(matrix)
return harmonic_matrix
def point_operation_filter(image : torch.Tensor,
samples : torch.Tensor,
origin : float,
increment : float) -> torch.Tensor:
"""
Performs 1-D Interpolation.
Parameters
----------
image : torch.Tensor
samples : torch.Tensor
origin : float
increment : float
Returns
-------
mask : torch.Tensor
Values that are interpolated and reshaped to shape of image.
"""
interp_X = origin + increment*torch.arange(0, samples.size(0))
interpolated_values = Interp1d()(interp_X, samples, torch.flatten(image))
mask = interpolated_values.reshape(image.size())
return mask
def roll(x, shift, dim):
"""
Similar to np.roll but applies to PyTorch Tensors
https://github.com/khammernik/sigmanet
"""
if isinstance(shift, (tuple, list)):
assert len(shift) == len(dim)
for s, d in zip(shift, dim):
x = roll(x, s, d)
return x
shift = shift % x.size(dim)
if shift == 0:
return x
left = x.narrow(dim, 0, x.size(dim) - shift)
right = x.narrow(dim, x.size(dim) - shift, shift)
return torch.cat((right, left), dim=dim)
def fftshift(x, dim=None):
"""
Similar to np.fft.fftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [dim // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = x.shape[dim] // 2
else:
shift = [x.shape[i] // 2 for i in dim]
return roll(x, shift, dim)
def ifftshift(x, dim=None):
"""
Similar to np.fft.ifftshift but applies to PyTorch Tensors
"""
if dim is None:
dim = tuple(range(x.dim()))
shift = [(dim + 1) // 2 for dim in x.shape]
elif isinstance(dim, int):
shift = (x.shape[dim] + 1) // 2
else:
shift = [(x.shape[i] + 1) // 2 for i in dim]
return roll(x, shift, dim) | PypiClean |
/Office365-REST-Python-Client-2.4.3.tar.gz/Office365-REST-Python-Client-2.4.3/office365/sharepoint/contenttypes/content_type.py | from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.runtime.paths.resource_path import ResourcePath
from office365.runtime.types.collections import StringCollection
from office365.sharepoint.base_entity import BaseEntity
from office365.sharepoint.contenttypes.content_type_id import ContentTypeId
from office365.sharepoint.contenttypes.fieldlinks.collection import FieldLinkCollection
from office365.sharepoint.fields.collection import FieldCollection
from office365.sharepoint.translation.user_resource import UserResource
class ContentType(BaseEntity):
"""
Specifies a content type.
A named and uniquely identifiable collection of settings and fields that store metadata for individual items
in a SharePoint list. One or more content types can be associated with a list, which restricts the contents
to items of those types
"""
def reorder_fields(self, field_names):
"""
The ReorderFields method is called to change the order in which fields appear in a content type.
:param list[str] field_names: Rearranges the collection of fields in the order in which field internal
names are specified.
"""
payload = {
"fieldNames": StringCollection(field_names)
}
qry = ServiceOperationQuery(self, "ReorderFields", None, payload)
self.context.add_query(qry)
return self
def update(self, update_children):
"""
Updates the content type, and any child objects of the content type if specified,
with any changes made to the content type.
:param bool update_children: Specifies whether changes propagate to child objects of the content type.
"""
super(ContentType, self).update()
if update_children:
payload = {"updateChildren": update_children}
qry = ServiceOperationQuery(self, "Update", None, payload)
self.context.add_query(qry)
return self
@property
def display_form_template_name(self):
"""
Specifies the name of a custom display form template to use for list items that have been assigned
the content type.
"""
return self.properties.get("DisplayFormTemplateName", None)
@property
def display_form_url(self):
"""
Specifies the URL of a custom display form to use for list items that have been assigned the content type.
"""
return self.properties.get("DisplayFormUrl", None)
@property
def id(self):
"""Specifies an identifier for the content type as specified in [MS-WSSTS] section 2.1.2.8.1.
:rtype: ContentTypeId
"""
return self.properties.get("Id", ContentTypeId())
@property
def string_id(self):
"""A string representation of the value of the Id
:rtype: str
"""
return self.properties.get("StringId", None)
@property
def name(self):
"""Gets the name of the content type.
:rtype: str or None
"""
return self.properties.get("Name", None)
@name.setter
def name(self, value):
"""Sets the name of the content type.
:rtype: str or None
"""
self.set_property("Name", value)
@property
def new_form_client_side_component_properties(self):
"""
:rtype: str or None
"""
return self.properties.get("NewFormClientSideComponentProperties", None)
@property
def description(self):
"""Gets the description of the content type.
:rtype: str or None
"""
return self.properties.get("Description", None)
@description.setter
def description(self, value):
"""Sets the description of the content type.
:type value: str
"""
self.set_property("Description", value)
@property
def description_resource(self):
"""Gets the SP.UserResource object (section 3.2.5.333) for the description of this content type"""
return self.properties.get('DescriptionResource',
UserResource(self.context, ResourcePath("DescriptionResource", self.resource_path)))
@property
def group(self):
"""Gets the group of the content type.
:rtype: str or None
"""
return self.properties.get("Group", None)
@group.setter
def group(self, value):
"""Sets the group of the content type.
:type value: str
"""
self.set_property("Group", value)
@property
def hidden(self):
"""Specifies whether the content type is unavailable for creation or usage directly from a user interface."""
return self.properties.get("Hidden", None)
@property
def js_link(self):
"""Gets or sets the JSLink for the content type custom form template"""
return self.properties.get("JSLink", None)
@property
def read_only(self):
"""Specifies whether changes to the content type properties are denied."""
return self.properties.get("ReadOnly", None)
@property
def name_resource(self):
"""Specifies the SP.UserResource object for the name of this content type"""
return self.properties.get('NameResource',
UserResource(self.context, ResourcePath("NameResource", self.resource_path)))
@property
def schema_xml(self):
"""Specifies the XML schema that represents the content type.
:rtype: str or None
"""
return self.properties.get("SchemaXml", None)
@property
def fields(self):
"""Gets a value that specifies the collection of fields for the content type."""
return self.properties.get('Fields',
FieldCollection(self.context, ResourcePath("Fields", self.resource_path)))
@property
def parent(self):
"""Gets the parent content type of the content type."""
return self.properties.get('Parent',
ContentType(self.context, ResourcePath("Parent", self.resource_path)))
@property
def field_links(self):
"""Specifies the collection of field links for the content type."""
return self.properties.get('FieldLinks',
FieldLinkCollection(self.context, ResourcePath("FieldLinks", self.resource_path)))
@property
def property_ref_name(self):
return "StringId"
def get_property(self, name, default_value=None):
if default_value is None:
property_mapping = {
"DescriptionResource": self.description_resource,
"FieldLinks": self.field_links,
"NameResource": self.name_resource
}
default_value = property_mapping.get(name, None)
return super(ContentType, self).get_property(name, default_value)
def set_property(self, name, value, persist_changes=True):
super(ContentType, self).set_property(name, value, persist_changes)
# fallback: create a new resource path
if name == self.property_ref_name and self._resource_path is None:
self._resource_path = self.parent_collection.get_by_id(value).resource_path
return self | PypiClean |
/flask_kits-0.0.24.tar.gz/flask_kits-0.0.24/flask_kits/restful/entity/pagination.py | from flask_kits.exceptions import InvalidFormatError
class Pagination(object):
"""
Example
>>> from flask_kits.restful import entity.Pagination
>>> class ItemsPagination(Pagination):
>>> limit = 20
>>> ORDERS = ('ID', 'NAME')
>>> class ItemEntity(EntityBase):
>>> Range = Field("Range", type=ItemsPagination, location=LOC_HEADERS, default=DirectoryPagination.default)
"""
DIRECTIONS = ('ASC', 'DESC')
ORDERS = ('ID',)
start = 0
limit = 100
order_by = 'ID'
order = 'ASC'
def __init__(self, line=None):
self._parse(line)
def _parse(self, line):
line = line or ''
segments = [x.strip() for x in line.split(';', 1) if x.strip()]
if not segments:
return
try:
segment, segments = segments[0], segments[1:]
order_by, start_and_end = segment.split(' ', 1)
if order_by not in self.ORDERS:
raise InvalidFormatError()
self.order_by = order_by
start, end = start_and_end.split('..')
start = int(start or str(self.start))
self.start = max(start, 0)
# end = int(end or str(self.start + self.limit))
# if not (0 < start < end):
# raise InvalidFormatError()
# self.start = start
# self.limit = end - start
if segments:
segment, = segments
options = dict([x.split('=') for x in segment.split(',') if x.strip()])
if 'order' in options:
order = options.get('order', self.order).upper()
if order not in self.DIRECTIONS:
order = Pagination.order
self.order = order
if 'max' in options:
size = int(options.get('max'))
self.limit = min(max(size, 0), 1000)
except InvalidFormatError as e:
raise e
except Exception:
raise InvalidFormatError()
def status_and_headers(self, rowcount, total_count=None):
code = 206 if rowcount == self.limit else 200
h = {'Accept-Range': ','.join(self.ORDERS)}
total = '*' if total_count is None else total_count
content_range = '{order_by} {start}..{end}/{total}'.format(
order_by=self.order_by,
start=self.start,
end=self.start + rowcount,
total=total)
h['Content-Range'] = content_range
if code == 206:
start = self.start + self.limit
end = start + self.limit
next_range = '{order_by} {start}..{end};order={order},max={limit}'.format(
order=self.order,
start=start,
end=end,
order_by=self.order_by,
limit=self.limit
)
h['Next-Range'] = next_range
return code, h
@classmethod
def default(cls):
return cls()
@property
def order_string(self):
return "{0} {1}".format(self.order_by, self.order) | PypiClean |
/Django_patch-2.2.19-py3-none-any.whl/django/contrib/postgres/aggregates/mixins.py | from django.db.models.expressions import F, OrderBy
class OrderableAggMixin:
def __init__(self, *expressions, ordering=(), **extra):
if not isinstance(ordering, (list, tuple)):
ordering = [ordering]
ordering = ordering or []
# Transform minus sign prefixed strings into an OrderBy() expression.
ordering = (
(OrderBy(F(o[1:]), descending=True) if isinstance(o, str) and o[0] == '-' else o)
for o in ordering
)
super().__init__(*expressions, **extra)
self.ordering = self._parse_expressions(*ordering)
def resolve_expression(self, *args, **kwargs):
self.ordering = [expr.resolve_expression(*args, **kwargs) for expr in self.ordering]
return super().resolve_expression(*args, **kwargs)
def as_sql(self, compiler, connection):
if self.ordering:
ordering_params = []
ordering_expr_sql = []
for expr in self.ordering:
expr_sql, expr_params = expr.as_sql(compiler, connection)
ordering_expr_sql.append(expr_sql)
ordering_params.extend(expr_params)
sql, sql_params = super().as_sql(compiler, connection, ordering=(
'ORDER BY ' + ', '.join(ordering_expr_sql)
))
return sql, sql_params + ordering_params
return super().as_sql(compiler, connection, ordering='')
def set_source_expressions(self, exprs):
# Extract the ordering expressions because ORDER BY clause is handled
# in a custom way.
self.ordering = exprs[self._get_ordering_expressions_index():]
return super().set_source_expressions(exprs[:self._get_ordering_expressions_index()])
def get_source_expressions(self):
return self.source_expressions + self.ordering
def get_source_fields(self):
# Filter out fields contributed by the ordering expressions as
# these should not be used to determine which the return type of the
# expression.
return [
e._output_field_or_none
for e in self.get_source_expressions()[:self._get_ordering_expressions_index()]
]
def _get_ordering_expressions_index(self):
"""Return the index at which the ordering expressions start."""
source_expressions = self.get_source_expressions()
return len(source_expressions) - len(self.ordering) | PypiClean |
/AutoTransform-1.1.1a8-py3-none-any.whl/autotransform/transformer/base.py |
# @black_format
"""The base class and associated classes for Transformer components."""
from __future__ import annotations
from abc import abstractmethod
from enum import Enum
from typing import Any, ClassVar, Generic, Mapping, Optional, TypeVar
from autotransform.batcher.base import Batch
from autotransform.util.component import ComponentFactory, ComponentImport, NamedComponent
TResult = TypeVar("TResult", bound=Optional[Mapping[str, Any]])
class TransformerName(str, Enum):
"""A simple enum for mapping."""
JSCODESHIFT = "jscodeshift"
LIBCST = "libcst"
OPEN_AI = "open_ai"
REGEX = "regex"
SCRIPT = "script"
class Transformer(Generic[TResult], NamedComponent):
"""The base for Transformer components. Transformers are used to execute changes to a codebase.
A Transformer takes in a Batch and then executes all changes associated with the Batch.
Attributes:
name (ClassVar[TransformerName]): The name of the component.
"""
name: ClassVar[TransformerName]
@abstractmethod
def transform(self, batch: Batch) -> TResult:
"""Execute a transformation against the provided Batch. All writing should be done via
CachedFile's write_content method or FileItem's write_content method to ensure operations
are easily accessible to testing and file content cache's are kept accurate.
Args:
batch (Batch): The Batch that will be transformed.
"""
FACTORY = ComponentFactory(
{
TransformerName.JSCODESHIFT: ComponentImport(
class_name="JSCodeshiftTransformer", module="autotransform.transformer.jscodeshift"
),
TransformerName.LIBCST: ComponentImport(
class_name="LibCSTTransformer", module="autotransform.transformer.libcst"
),
TransformerName.OPEN_AI: ComponentImport(
class_name="OpenAITransformer", module="autotransform.transformer.openai"
),
TransformerName.REGEX: ComponentImport(
class_name="RegexTransformer", module="autotransform.transformer.regex"
),
TransformerName.SCRIPT: ComponentImport(
class_name="ScriptTransformer", module="autotransform.transformer.script"
),
},
Transformer, # type: ignore [type-abstract]
"transformer.json",
) | PypiClean |
/Flask-Wizard-0.5.28.tar.gz/Flask-Wizard-0.5.28/flask_wizard/slack.py | from __future__ import absolute_import
from __future__ import print_function
import os
import json
import requests
import base64
import sys
import apiai
import uuid
import random
import time
from slackclient import SlackClient
from timeit import default_timer as timer
from flask import request, jsonify
from actions import *
class SlackHandler(object):
"""
The facebook handler acts as the interface to handle all requests coming
from messenger.
It parses the payload and responds
"""
def __init__(self, pid, pad , verify_token,bot_token, ozz_guid, actions, redis_db, mongo, log):
self.redis_db = redis_db
self.mongo = mongo
self.log = log
self.sc = SlackClient(bot_token)
self.pid = pid
self.pad = pad
self.verify_token = verify_token
self.bot_token = bot_token
with open(actions,"r") as jsonFile:
self.actions = json.load(jsonFile)
if ozz_guid != "":
if ozz_guid[:4] == 'api_':
self.api = apiai.ApiAI(ozz_guid[4:])
print("Slack endpoint - /api/messages/slack")
def respond(self,*args,**kwargs):
data = request.get_data()
if(type(data) == type(b"")):
data = json.loads(data.decode())
if 'challenge' in data.keys():
clg = data['challenge']
return str(clg)
elif self.verify_token == data['token']:
if "event" in data:
if "message" == data["event"]['type']:
message = data["event"]["text"]
start = timer()
intent=None
entities=None
action=None
print (data["event"])
if 'subtype' not in data["event"]:
id = data["event"]["channel"]
if self.api:
r = self.api.text_request()
r.session_id = uuid.uuid4().hex
r.query = message
res = r.getresponse()
res = json.loads(res.read().decode('utf-8'))
intent = res["result"]["action"]
if intent == '':
intent = res["result"]["metadata"]["intentName"]
response = res["result"]["fulfillment"]["speech"]
entities = res["result"]['parameters']
if intent in self.actions:
if type(self.actions[intent]) == list:
response = random.choice(self.actions[intent])
self.send_message(id,response)
else:
#func = eval(self.actions[intent])
session = {}
session['user']= {
'id':data["event"]["channel"]
}
session['intent'] = intent
session['entities'] = entities
session['message'] = message
session['channel'] = 'slack'
message = eval(self.actions[intent])
self.send_message(id, message)
#func(session)
elif response != "":
end = timer()
runtime = str(end - start)
if self.mongo:
log_object = {"message":message,"channel":"slack","intent":intent,"entities":entities,"action":action,"response":str(response),"runtime":runtime,"time":str(time.time())}
self.mongo.db.logs.insert_one(log_object)
self.send_message(id,response)
else:
end = timer()
runtime = str(end - start)
if self.mongo:
log_object = {"message":message,"channel":"slack","intent":intent,"entities":entities,"action":action,"response":str(message),"runtime":runtime,"time":str(time.time())}
self.mongo.db.logs.insert_one(log_object)
self.send_message(id, message)
return "Responded!"
else:
print ('Error verifying token')
def send_message(self, id, text):
"""
Send the message text to recipient with id recipient.
"""
print ('Sending Mssg',text)
if sys.version_info >= (3, 0):
message = text
else:
message = text.decode('unicode_escape')
r = requests.post('https://slack.com/api/chat.meMessage',
headers={'Content-type': 'application/x-www-form-urlencoded'},
data={"token":self.bot_token,"text":message,"as_user":True,"channel":id})
print (r.text)
"""
n = 2
while n:
if self.sc.rtm_connect():
r = self.sc.api_call("chat.postMessage", channel=id,
text=text, as_user=True)
#print (r)
break
else:
n-=1
print ('Error sending message')
""" | PypiClean |
/EthTx-0.3.22.tar.gz/EthTx-0.3.22/ethtx/providers/etherscan/client.py |
import logging
from collections import OrderedDict
from typing import Dict, Optional
import requests
from ethtx.exceptions import ProcessingException
log = logging.getLogger(__name__)
class EtherscanClient:
MODULE = "module="
ACTION = "&action="
CONTRACT_ADDRESS = "&contractaddress="
ADDRESS = "&address="
OFFSET = "&offset="
PAGE = "&page="
SORT = "&sort="
BLOCK_TYPE = "&blocktype="
TO = "&to="
VALUE = "&value="
DATA = "&data="
POSITION = "&position="
HEX = "&hex="
GAS_PRICE = "&gasPrice="
GAS = "&gas="
START_BLOCK = "&startblock="
END_BLOCK = "&endblock="
BLOCKNO = "&blockno="
TXHASH = "&txhash="
TAG = "&tag="
BOOLEAN = "&boolean="
INDEX = "&index="
API_KEY = "&apikey="
url_dict: OrderedDict = {}
def __init__(
self,
api_key: str,
nodes: Dict[str, str],
default_chain_id: Optional[str] = None,
):
self.api_key = api_key
self.endpoints = nodes
self.default_chain = default_chain_id
self.http = requests.session()
self.http.headers.update({"User-Agent": "API"})
self.url_dict = OrderedDict(
[
(self.MODULE, ""),
(self.ADDRESS, ""),
(self.ACTION, ""),
(self.OFFSET, ""),
(self.PAGE, ""),
(self.SORT, ""),
(self.BLOCK_TYPE, ""),
(self.TO, ""),
(self.VALUE, ""),
(self.DATA, ""),
(self.POSITION, ""),
(self.HEX, ""),
(self.GAS_PRICE, ""),
(self.GAS, ""),
(self.START_BLOCK, ""),
(self.END_BLOCK, ""),
(self.BLOCKNO, ""),
(self.TXHASH, ""),
(self.TAG, ""),
(self.BOOLEAN, ""),
(self.INDEX, ""),
(self.API_KEY, api_key),
]
)
def build_url(self, chain_id: str, url_dict: OrderedDict) -> str:
return (
self.endpoints[chain_id]
+ "?"
+ "".join([param + val if val else "" for param, val in url_dict.items()])
)
def _get_chain_id(self, chain_id) -> str:
_id = chain_id or self.default_chain
if _id is None:
raise ProcessingException(
"chain_id must be provided as argument or constructor default"
)
return _id | PypiClean |
/CNN4IE-0.1.9-py3-none-any.whl/cnn4ie/mixed_attention_cnn/mixed_attention.py | import torch
import torch.nn as nn
import math
class SeparableConv1D(nn.Module):
"""This class implements separable convolution, i.e. a depthwise and a pointwise layer"""
def __init__(self, input_filters, output_filters, kernel_size):
super(SeparableConv1D, self).__init__()
self.depthwise = nn.Conv1d(
input_filters,
input_filters,
kernel_size=kernel_size,
groups=input_filters,
padding=kernel_size // 2,
bias=False,
)
self.pointwise = nn.Conv1d(input_filters, output_filters, kernel_size=1, bias=False)
self.bias = nn.Parameter(torch.zeros(output_filters, 1))
self.depthwise.weight.data.normal_(mean=0.0, std=0.01)
self.pointwise.weight.data.normal_(mean=0.0, std=0.01)
def forward(self, hidden_states):
x = self.depthwise(hidden_states)
x = self.pointwise(x)
x += self.bias
return x
class MixedAttention(nn.Module):
def __init__(self, hidden_size, num_attention_heads, head_ratio, conv_kernel_size, dropout):
super(MixedAttention, self).__init__()
assert hidden_size % num_attention_heads == 0, f'The hidden size ({hidden_size}) is not a multiple of the number of attention'
new_num_attention_heads = num_attention_heads // head_ratio
if new_num_attention_heads < 1:
self.head_ratio = num_attention_heads
self.num_attention_heads = 1
else:
self.num_attention_heads = new_num_attention_heads
self.head_ratio = head_ratio
self.conv_kernel_size = conv_kernel_size
assert hidden_size % num_attention_heads == 0, f'The hidden size ({hidden_size}) is not a multiple of the number of attention'
self.attention_head_size = hidden_size // num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(hidden_size, self.all_head_size)
self.key = nn.Linear(hidden_size, self.all_head_size)
self.value = nn.Linear(hidden_size, self.all_head_size)
self.key_conv_attn_layer = SeparableConv1D(hidden_size, self.all_head_size, self.conv_kernel_size)
self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size)
self.conv_out_layer = nn.Linear(hidden_size, self.all_head_size)
self.unfold = nn.Unfold(kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0])
self.dropout = nn.Dropout(dropout)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, x):
'''
:param x: [batch_size, src_len, hid_dim]
:return:
'''
mixed_query_layer = self.query(x)
batch_size = x.size(0)
mixed_key_layer = self.key(x)
mixed_value_layer = self.value(x)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(x.transpose(1, 2))
mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1)
conv_out_layer = self.conv_out_layer(x)
conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1)
conv_out_layer = nn.functional.unfold(
conv_out_layer,
kernel_size=[self.conv_kernel_size, 1],
dilation=1,
padding=[(self.conv_kernel_size - 1) // 2, 0],
stride=1,
)
conv_out_layer = conv_out_layer.transpose(1, 2).reshape(
batch_size, -1, self.all_head_size, self.conv_kernel_size
)
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = torch.cat([context_layer, conv_out], 2)
new_context_layer_shape = context_layer.size()[:-2] + (self.head_ratio * self.all_head_size,)
outputs = context_layer.view(*new_context_layer_shape) # [batch_size, src_len, hid_dim]
return outputs | PypiClean |
/Ajango-1.0.0.tar.gz/Ajango-1.0.0/ajango/generator/__init__.py | from __future__ import print_function
from django.core.management.base import CommandError
from ajango.generator.project_manager import ProjectManager
from ajango.generator.application import Application
from ajango.generator.config_global import ConfigGlobal
from xml.dom import minidom
import os.path
class AjangoGenerator(object):
""" Obiekt generujacy aplikacje na podstawie pliku szkieletowego. """
def __init__(self, project_name, skeleton_file):
self.project = ProjectManager(project_name)
ConfigGlobal().set('project_manager', self.project)
self.options = None
self.apps = []
self.main_app = None
self.render(skeleton_file)
def set_options(self, options):
""" Ustawienie opcji dla obiektu. """
self.options = options
def render(self, skeleton_file):
""" Budowanie zestawu obiektow dla pliku XML. """
if not os.path.isfile(skeleton_file):
raise CommandError(
"%r: File doesn't exist" % skeleton_file
)
print("Render: "+ skeleton_file)
xmldoc_main = minidom.parse(skeleton_file)
xmldoc = xmldoc_main.childNodes[0]
if xmldoc.tagName.upper() != 'AJANGO':
raise CommandError("This Ajango skeleton is not Valid")
for elem in xmldoc.childNodes:
if isinstance(elem, minidom.Element):
# Renderowanie aplikacji
if elem.tagName.upper() == 'APPLICATION':
app = Application(elem)
for once in self.apps:
# Sprawdzanie czy aplikacje sie nie powtarzaja
if app.get_name() == once.get_name():
raise CommandError("There are two or more "
"application named: %r" %
app.get_name())
if app.getAttribute("main").lower() == "main":
# Sprawdzanie czy aplikacja nie jest glowna
if self.main_app != None:
raise CommandError("There are two or more "
"application signed as 'main'")
self.main_app = app
self.apps.append(app)
self.project.add_main_view_url(app)
else:
raise CommandError("Unknown tag name: %r " % elem.tagName)
if self.main_app == None:
# Jesli brak glownej aplikacji to pierwsza jest glowna.
self.main_app = self.apps[0]
self.project.add_main_url(self.main_app)
def make_apps(self):
""" Stworzenie wszystkich aplikacji. """
for app in self.apps:
if self.options != None:
app.set_options(self.options)
app.make_new()
print("Building app: " + app.get_name())
app.execution()
project = ConfigGlobal().get('project_manager')
project.execute() | PypiClean |
/NlpToolkit-Classification-1.0.16.tar.gz/NlpToolkit-Classification-1.0.16/Classification/Classifier/Lda.py | from Math.Matrix import Matrix
from Math.Vector import Vector
from Classification.Classifier.Classifier import Classifier
from Classification.InstanceList.InstanceList import InstanceList
from Classification.InstanceList.Partition import Partition
from Classification.Model.LdaModel import LdaModel
from Classification.Parameter.Parameter import Parameter
import math
class Lda(Classifier):
def train(self,
trainSet: InstanceList,
parameters: Parameter = None):
"""
Training algorithm for the linear discriminant analysis classifier (Introduction to Machine Learning, Alpaydin,
2015).
PARAMETERS
----------
trainSet : InstanceList
Training data given to the algorithm.
parameters : Parameter
Parameter of the Lda algorithm.
"""
w0 = {}
w = {}
prior_distribution = trainSet.classDistribution()
class_lists = Partition(trainSet)
covariance = Matrix(trainSet.get(0).continuousAttributeSize(), trainSet.get(0).continuousAttributeSize())
for i in range(class_lists.size()):
average_vector = Vector(class_lists.get(i).continuousAverage())
class_covariance = class_lists.get(i).covariance(average_vector)
class_covariance.multiplyWithConstant(class_lists.get(i).size() - 1)
covariance.add(class_covariance)
covariance.divideByConstant(trainSet.size() - class_lists.size())
covariance.inverse()
for i in range(class_lists.size()):
Ci = class_lists.get(i).getClassLabel()
average_vector = Vector(class_lists.get(i).continuousAverage())
wi = covariance.multiplyWithVectorFromRight(average_vector)
w[Ci] = wi
w0i = -0.5 * wi.dotProduct(average_vector) + math.log(prior_distribution.getProbability(Ci))
w0[Ci] = w0i
self.model = LdaModel(prior_distribution, w, w0)
def loadModel(self, fileName: str):
self.model = LdaModel(fileName) | PypiClean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.