content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python
# import general use modules
import os
from pprint import pprint as pp
# import nornir specifics
from nornir import InitNornir
from nornir.plugins.functions.text import print_result
from nornir.core.filter import F
nr = InitNornir()
hosts = nr.inventory.hosts
arista1_filter = nr.filter(name="arista1")
arista1 = arista1_filter.inventory.hosts
#print(hosts)
print(arista1)
wan_filter = nr.filter(role="WAN")
wan_filter = wan_filter.inventory.hosts
print(wan_filter)
wan_port_filter = nr.filter(role="WAN").filter(port=22)
wan_port_filter = wan_port_filter.inventory.hosts
print(wan_port_filter)
sfo_filter = nr.filter(F(groups__contains="sfo"))
sfo_filter = sfo_filter.inventory.hosts
print(sfo_filter)
| 21.617647 | 55 | 0.794558 | [
"MIT"
] | papri-entropy/nornir-course | class3/exercise2/exercise2.py | 735 | Python |
# -*- coding: utf-8 -*-
#
# Political Dynamics documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Political Dynamics'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'political-dynamicsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'political-dynamics.tex',
u'Political Dynamics Documentation',
u"Arya D. McCarthy", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'political-dynamics', u'Political Dynamics Documentation',
[u"Arya D. McCarthy"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'political-dynamics', u'Political Dynamics Documentation',
u"Arya D. McCarthy", 'Political Dynamics',
'A differential equations perspective on American National Election Studies (ANES) over time.[D[D[D', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 32.420408 | 127 | 0.709178 | [
"MIT"
] | aryamccarthy/ANES | docs/conf.py | 7,943 | Python |
# Copyright 2020 University of New South Wales, University of Sydney, Ingham Institute
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import SimpleITK as sitk
import numpy as np
from loguru import logger
from platipy.imaging.registration.utils import apply_transform, convert_mask_to_reg_structure
from platipy.imaging.registration.linear import (
linear_registration,
)
from platipy.imaging.registration.deformable import (
fast_symmetric_forces_demons_registration,
)
from platipy.imaging.label.fusion import (
process_probability_image,
compute_weight_map,
combine_labels,
)
from platipy.imaging.label.iar import run_iar
from platipy.imaging.utils.vessel import vessel_spline_generation
from platipy.imaging.utils.valve import (
generate_valve_from_great_vessel,
generate_valve_using_cylinder,
)
from platipy.imaging.utils.conduction import (
geometric_sinoatrialnode,
geometric_atrioventricularnode,
)
from platipy.imaging.utils.crop import label_to_roi, crop_to_roi
from platipy.imaging.generation.mask import extend_mask
from platipy.imaging.label.utils import binary_encode_structure_list, correct_volume_overlap
ATLAS_PATH = "/atlas"
if "ATLAS_PATH" in os.environ:
ATLAS_PATH = os.environ["ATLAS_PATH"]
CARDIAC_SETTINGS_DEFAULTS = {
"atlas_settings": {
"atlas_id_list": [
"03",
"05",
"08",
"10",
"11",
"12",
"13",
"16",
"24",
"35",
],
"atlas_structure_list": [
"AORTICVALVE",
"ASCENDINGAORTA",
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"LEFTATRIUM",
"LEFTVENTRICLE",
"MITRALVALVE",
"PULMONARYARTERY",
"PULMONICVALVE",
"RCORONARYARTERY",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"atlas_path": ATLAS_PATH,
"atlas_image_format": "Case_{0}/Images/Case_{0}_CROP.nii.gz",
"atlas_label_format": "Case_{0}/Structures/Case_{0}_{1}_CROP.nii.gz",
"crop_atlas_to_structures": False,
"crop_atlas_expansion_mm": (20, 20, 40),
"guide_structure_name": "WHOLEHEART",
"superior_extension": 30,
},
"auto_crop_target_image_settings": {
"expansion_mm": [20, 20, 40],
},
"linear_registration_settings": {
"reg_method": "affine",
"shrink_factors": [16, 8, 4],
"smooth_sigmas": [0, 0, 0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 50,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
"verbose": False,
},
"structure_guided_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
16,
8,
2,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [50, 50, 50],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"deformable_registration_settings": {
"isotropic_resample": True,
"resolution_staging": [
6,
3,
1.5,
], # specify voxel size (mm) since isotropic_resample is set
"iteration_staging": [200, 150, 100],
"smoothing_sigmas": [0, 0, 0],
"ncores": 8,
"default_value": 0,
"verbose": False,
},
"iar_settings": {
"reference_structure": False,
"smooth_distance_maps": True,
"smooth_sigma": 1,
"z_score_statistic": "mad",
"outlier_method": "iqr",
"outlier_factor": 1.5,
"min_best_atlases": 5,
"project_on_sphere": False,
},
"label_fusion_settings": {
"vote_type": "unweighted",
"vote_params": None,
"optimal_threshold": {
"AORTICVALVE": 0.5,
"ASCENDINGAORTA": 0.44,
"LEFTATRIUM": 0.40,
"LEFTVENTRICLE": 0.45,
"MITRALVALVE": 0.5,
"PULMONARYARTERY": 0.46,
"PULMONICVALVE": 0.5,
"RIGHTATRIUM": 0.38,
"RIGHTVENTRICLE": 0.42,
"SVC": 0.44,
"TRICUSPIDVALVE": 0.5,
"WHOLEHEART": 0.5,
},
},
"vessel_spline_settings": {
"vessel_name_list": [
"LANTDESCARTERY",
"LCIRCUMFLEXARTERY",
"LCORONARYARTERY",
"RCORONARYARTERY",
],
"vessel_radius_mm_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
"scan_direction_dict": {
"LANTDESCARTERY": "z",
"LCIRCUMFLEXARTERY": "z",
"LCORONARYARTERY": "x",
"RCORONARYARTERY": "z",
},
"stop_condition_type_dict": {
"LANTDESCARTERY": "count",
"LCIRCUMFLEXARTERY": "count",
"LCORONARYARTERY": "count",
"RCORONARYARTERY": "count",
},
"stop_condition_value_dict": {
"LANTDESCARTERY": 2,
"LCIRCUMFLEXARTERY": 2,
"LCORONARYARTERY": 2,
"RCORONARYARTERY": 2,
},
},
"geometric_segmentation_settings": {
"run_geometric_algorithms": True,
"geometric_name_suffix": "_GEOMETRIC",
"atlas_structure_names": {
"atlas_left_ventricle": "LEFTVENTRICLE",
"atlas_right_ventricle": "RIGHTVENTRICLE",
"atlas_left_atrium": "LEFTATRIUM",
"atlas_right_atrium": "RIGHTATRIUM",
"atlas_ascending_aorta": "ASCENDINGAORTA",
"atlas_pulmonary_artery": "PULMONARYARTERY",
"atlas_superior_vena_cava": "SVC",
"atlas_whole_heart": "WHOLEHEART",
},
"valve_definitions": {
"mitral_valve_thickness_mm": 10,
"mitral_valve_radius_mm": 15,
"tricuspid_valve_thickness_mm": 10,
"tricuspid_valve_radius_mm": 15,
"pulmonic_valve_thickness_mm": 10,
"aortic_valve_thickness_mm": 10,
},
"conduction_system_definitions": {
"sinoatrial_node_radius_mm": 10,
"atrioventricular_node_radius_mm": 10,
},
},
"postprocessing_settings": {
"run_postprocessing": True,
"binaryfillhole_mm": 3,
"structures_for_binaryfillhole": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"SVC",
"AORTICVALVE",
"MITRALVALVE",
"PULMONICVALVE",
"TRICUSPIDVALVE",
"WHOLEHEART",
],
"structures_for_overlap_correction": [
"ASCENDINGAORTA",
"LEFTATRIUM",
"LEFTVENTRICLE",
"RIGHTATRIUM",
"RIGHTVENTRICLE",
"PULMONARYARTERY",
"SVC",
],
},
"return_atlas_guide_structure": False,
"return_as_cropped": False,
"return_proba_as_contours": False,
}
def run_cardiac_segmentation(img, guide_structure=None, settings=CARDIAC_SETTINGS_DEFAULTS):
"""Runs the atlas-based cardiac segmentation
Args:
img (sitk.Image):
settings (dict, optional): Dictionary containing settings for algorithm.
Defaults to default_settings.
Returns:
dict: Dictionary containing output of segmentation
"""
results = {}
results_prob = {}
return_as_cropped = settings["return_as_cropped"]
"""
Initialisation - Read in atlases
- image files
- structure files
Atlas structure:
'ID': 'Original': 'CT Image' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'RIR' : 'CT Image' : sitk.Image
'Transform' : transform parameter map
'Struct A' : sitk.Image
'Struct B' : sitk.Image
'DIR' : 'CT Image' : sitk.Image
'Transform' : displacement field transform
'Weight Map' : sitk.Image
'Struct A' : sitk.Image
'Struct B' : sitk.Image
"""
logger.info("")
# Settings
atlas_path = settings["atlas_settings"]["atlas_path"]
atlas_id_list = settings["atlas_settings"]["atlas_id_list"]
atlas_structure_list = settings["atlas_settings"]["atlas_structure_list"]
atlas_image_format = settings["atlas_settings"]["atlas_image_format"]
atlas_label_format = settings["atlas_settings"]["atlas_label_format"]
crop_atlas_to_structures = settings["atlas_settings"]["crop_atlas_to_structures"]
crop_atlas_expansion_mm = settings["atlas_settings"]["crop_atlas_expansion_mm"]
atlas_set = {}
for atlas_id in atlas_id_list:
atlas_set[atlas_id] = {}
atlas_set[atlas_id]["Original"] = {}
image = sitk.ReadImage(f"{atlas_path}/{atlas_image_format.format(atlas_id)}")
structures = {
struct: sitk.ReadImage(f"{atlas_path}/{atlas_label_format.format(atlas_id, struct)}")
for struct in atlas_structure_list
}
if crop_atlas_to_structures:
logger.info(f"Automatically cropping atlas: {atlas_id}")
original_volume = np.product(image.GetSize())
crop_box_size, crop_box_index = label_to_roi(
structures.values(), expansion_mm=crop_atlas_expansion_mm
)
image = crop_to_roi(image, size=crop_box_size, index=crop_box_index)
final_volume = np.product(image.GetSize())
logger.info(f" > Volume reduced by factor {original_volume/final_volume:.2f}")
for struct in atlas_structure_list:
structures[struct] = crop_to_roi(
structures[struct], size=crop_box_size, index=crop_box_index
)
atlas_set[atlas_id]["Original"]["CT Image"] = image
for struct in atlas_structure_list:
atlas_set[atlas_id]["Original"][struct] = structures[struct]
"""
Step 1 - Automatic cropping
If we have a guide structure:
- use structure to crop target image
Otherwise:
- using a quick registration to register each atlas
- expansion of the bounding box to ensure entire volume of interest is enclosed
- target image is cropped
"""
expansion_mm = settings["auto_crop_target_image_settings"]["expansion_mm"]
if guide_structure:
crop_box_size, crop_box_index = label_to_roi(guide_structure, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
guide_structure = crop_to_roi(guide_structure, crop_box_size, crop_box_index)
target_reg_structure = convert_mask_to_reg_structure(guide_structure, expansion=2)
else:
quick_reg_settings = {
"reg_method": "similarity",
"shrink_factors": [8],
"smooth_sigmas": [0],
"sampling_rate": 0.75,
"default_value": -1000,
"number_of_iterations": 25,
"final_interp": sitk.sitkLinear,
"metric": "mean_squares",
"optimiser": "gradient_descent_line_search",
}
registered_crop_images = []
logger.info("Running initial Translation tranform to crop image volume")
for atlas_id in atlas_id_list[: min([8, len(atlas_id_list)])]:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["RIR"] = {}
atlas_image = atlas_set[atlas_id]["Original"]["CT Image"]
reg_image, _ = linear_registration(
img,
atlas_image,
**quick_reg_settings,
)
registered_crop_images.append(sitk.Cast(reg_image, sitk.sitkFloat32))
del reg_image
combined_image = sum(registered_crop_images) / len(registered_crop_images) > -1000
crop_box_size, crop_box_index = label_to_roi(combined_image, expansion_mm=expansion_mm)
img_crop = crop_to_roi(img, crop_box_size, crop_box_index)
logger.info("Calculated crop box:")
logger.info(f" > {crop_box_index}")
logger.info(f" > {crop_box_size}")
logger.info(f" > Vol reduction = {np.product(img.GetSize())/np.product(crop_box_size):.2f}")
"""
Step 2 - Rigid registration of target images
- Individual atlas images are registered to the target
- The transformation is used to propagate the labels onto the target
"""
linear_registration_settings = settings["linear_registration_settings"]
logger.info(
f"Running {linear_registration_settings['reg_method']} tranform to align atlas images"
)
for atlas_id in atlas_id_list:
# Register the atlases
logger.info(f" > atlas {atlas_id}")
atlas_set[atlas_id]["RIR"] = {}
if guide_structure:
guide_structure_name = settings["atlas_settings"]["guide_structure_name"]
target_reg_image = target_reg_structure
atlas_reg_image = convert_mask_to_reg_structure(
atlas_set[atlas_id]["Original"][guide_structure_name], expansion=2
)
else:
target_reg_image = img_crop
atlas_reg_image = atlas_set[atlas_id]["Original"]["CT Image"]
_, initial_tfm = linear_registration(
target_reg_image,
atlas_reg_image,
**linear_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["RIR"]["Transform"] = initial_tfm
if guide_structure:
atlas_set[atlas_id]["RIR"]["Reg Mask"] = apply_transform(
input_image=atlas_reg_image,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkLinear,
)
expanded_atlas_guide_structure = extend_mask(
atlas_set[atlas_id]["Original"][guide_structure_name],
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=expanded_atlas_guide_structure,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["Original"]["CT Image"],
reference_image=img_crop,
transform=initial_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
# sitk.WriteImage(rigid_image, f"./RR_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["Original"][struct]
atlas_set[atlas_id]["RIR"][struct] = apply_transform(
input_image=input_struct,
reference_image=img_crop,
transform=initial_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["Original"] = None
"""
Step 3 - Deformable image registration
- Using Fast Symmetric Diffeomorphic Demons
"""
if guide_structure:
structure_guided_registration_settings = settings["structure_guided_registration_settings"]
logger.info("Running structure-guided deformable registration on atlas labels")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR_STRUCT"] = {}
deform_image, struct_guided_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_structure,
atlas_set[atlas_id]["RIR"]["Reg Mask"],
**structure_guided_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR_STRUCT"]["Reg Mask"] = deform_image
atlas_set[atlas_id]["DIR_STRUCT"]["Transform"] = struct_guided_tfm
atlas_set[atlas_id]["DIR_STRUCT"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"]["CT Image"],
transform=struct_guided_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
atlas_set[atlas_id]["DIR_STRUCT"][guide_structure_name + "EXPANDED"] = apply_transform(
input_image=atlas_set[atlas_id]["RIR"][guide_structure_name + "EXPANDED"],
reference_image=img_crop,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
# sitk.WriteImage(deform_image, f"./DIR_STRUCT_{atlas_id}.nii.gz")
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id]["RIR"][struct]
atlas_set[atlas_id]["DIR_STRUCT"][struct] = apply_transform(
input_image=input_struct,
transform=struct_guided_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id]["RIR"] = None
# Settings
deformable_registration_settings = settings["deformable_registration_settings"]
logger.info("Running DIR to refine atlas image registration")
for atlas_id in atlas_id_list:
logger.info(f" > atlas {atlas_id}")
# Register the atlases
atlas_set[atlas_id]["DIR"] = {}
if guide_structure:
label = "DIR_STRUCT"
else:
label = "RIR"
atlas_reg_image = atlas_set[atlas_id][label]["CT Image"]
target_reg_image = img_crop
if guide_structure:
expanded_atlas_mask = atlas_set[atlas_id]["DIR_STRUCT"][
guide_structure_name + "EXPANDED"
]
expanded_target_mask = extend_mask(
guide_structure,
direction=("ax", "sup"),
extension_mm=settings["atlas_settings"]["superior_extension"],
interior_mm_shape=settings["atlas_settings"]["superior_extension"] / 2,
)
combined_mask = sitk.Maximum(expanded_atlas_mask, expanded_target_mask)
atlas_reg_image = sitk.Mask(atlas_reg_image, combined_mask, outsideValue=-1000)
atlas_reg_image = sitk.Mask(
atlas_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
target_reg_image = sitk.Mask(target_reg_image, combined_mask, outsideValue=-1000)
target_reg_image = sitk.Mask(
target_reg_image, atlas_reg_image > -400, outsideValue=-1000
)
deform_image, dir_tfm, _ = fast_symmetric_forces_demons_registration(
target_reg_image,
atlas_reg_image,
**deformable_registration_settings,
)
# Save in the atlas dict
atlas_set[atlas_id]["DIR"]["Transform"] = dir_tfm
atlas_set[atlas_id]["DIR"]["CT Image"] = apply_transform(
input_image=atlas_set[atlas_id][label]["CT Image"],
transform=dir_tfm,
default_value=-1000,
interpolator=sitk.sitkLinear,
)
for struct in atlas_structure_list:
input_struct = atlas_set[atlas_id][label][struct]
atlas_set[atlas_id]["DIR"][struct] = apply_transform(
input_image=input_struct,
transform=dir_tfm,
default_value=0,
interpolator=sitk.sitkNearestNeighbor,
)
atlas_set[atlas_id][label] = None
"""
Step 4 - Iterative atlas removal
- This is an automatic process that will attempt to remove inconsistent atlases from the entire set
"""
# Compute weight maps
# Here we use simple GWV as this minises the potentially negative influence of mis-registered
# atlases
iar_settings = settings["iar_settings"]
if iar_settings["reference_structure"]:
for atlas_id in atlas_id_list:
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(img_crop, atlas_image, vote_type="global")
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
atlas_set = run_iar(atlas_set=atlas_set, **iar_settings)
else:
logger.info("IAR: No reference structure, skipping iterative atlas removal.")
"""
Step 4 - Vessel Splining
"""
vessel_spline_settings = settings["vessel_spline_settings"]
if len(vessel_spline_settings["vessel_name_list"]) > 0:
segmented_vessel_dict = vessel_spline_generation(
img_crop, atlas_set, **vessel_spline_settings
)
else:
logger.info("No vessel splining required, continue.")
"""
Step 5 - Label Fusion
"""
# Compute weight maps
vote_type = settings["label_fusion_settings"]["vote_type"]
vote_params = settings["label_fusion_settings"]["vote_params"]
# Compute weight maps
for atlas_id in list(atlas_set.keys()):
atlas_image = atlas_set[atlas_id]["DIR"]["CT Image"]
weight_map = compute_weight_map(
img_crop, atlas_image, vote_type=vote_type, vote_params=vote_params
)
atlas_set[atlas_id]["DIR"]["Weight Map"] = weight_map
combined_label_dict = combine_labels(atlas_set, atlas_structure_list)
"""
Step 6 - Paste the cropped structure into the original image space
"""
logger.info("Generating binary segmentations.")
template_img_binary = sitk.Cast((img * 0), sitk.sitkUInt8)
template_img_prob = sitk.Cast((img * 0), sitk.sitkFloat64)
vote_structures = settings["label_fusion_settings"]["optimal_threshold"].keys()
vote_structures = [i for i in vote_structures if i in atlas_structure_list]
for structure_name in vote_structures:
probability_map = combined_label_dict[structure_name]
optimal_threshold = settings["label_fusion_settings"]["optimal_threshold"][structure_name]
binary_struct = process_probability_image(probability_map, optimal_threshold)
if return_as_cropped:
results[structure_name] = binary_struct
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
results_prob[structure_name] = binary_encode_structure_list(atlas_contours)
else:
results_prob[structure_name] = probability_map
# We also generate another version of the guide_structure using the atlas contours
# We *can* return this, but probably don't want to
# Here this check is performed
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
results[guide_structure_name] = guide_structure
results_prob[guide_structure_name] = guide_structure
else:
if settings["return_proba_as_contours"]:
atlas_contours = [
atlas_set[atlas_id]["DIR"][structure_name] >= 2 for atlas_id in atlas_id_list
]
probability_img = binary_encode_structure_list(atlas_contours)
template_img_prob = sitk.Cast((img * 0), sitk.sitkUInt32)
else:
probability_img = probability_map
# Un-crop binary structure
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
# Un-crop probability map
paste_prob_img = sitk.Paste(
template_img_prob,
probability_img,
probability_img.GetSize(),
(0, 0, 0),
crop_box_index,
)
results_prob[structure_name] = paste_prob_img
# Un-crop the guide structure
if (not settings["return_atlas_guide_structure"]) and (guide_structure is not None):
new_guide_structure = sitk.Paste(
template_img_binary,
guide_structure,
guide_structure.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[guide_structure_name] = new_guide_structure
results_prob[guide_structure_name] = new_guide_structure
for structure_name in vessel_spline_settings["vessel_name_list"]:
binary_struct = segmented_vessel_dict[structure_name]
if return_as_cropped:
results[structure_name] = binary_struct
vessel_list = [
atlas_set[atlas_id]["DIR"][structure_name] for atlas_id in list(atlas_set.keys())
]
else:
# Un-crop binary vessel
paste_img_binary = sitk.Paste(
template_img_binary,
binary_struct,
binary_struct.GetSize(),
(0, 0, 0),
crop_box_index,
)
results[structure_name] = paste_img_binary
vessel_list = []
for atlas_id in list(atlas_set.keys()):
paste_img_binary = sitk.Paste(
template_img_binary,
atlas_set[atlas_id]["DIR"][structure_name],
atlas_set[atlas_id]["DIR"][structure_name].GetSize(),
(0, 0, 0),
crop_box_index,
)
vessel_list.append(paste_img_binary)
# Encode list of vessels
encoded_vessels = binary_encode_structure_list(vessel_list)
results_prob[structure_name] = encoded_vessels
"""
Step 7 - Geometric definitions of cardiac valves and conduction system nodes
"""
geometric_segmentation_settings = settings["geometric_segmentation_settings"]
if geometric_segmentation_settings["run_geometric_algorithms"]:
logger.info("Computing geometric definitions for valves and conduction system.")
geom_atlas_names = geometric_segmentation_settings["atlas_structure_names"]
geom_valve_defs = geometric_segmentation_settings["valve_definitions"]
geom_conduction_defs = geometric_segmentation_settings["conduction_system_definitions"]
# 1 - MITRAL VALVE
mv_name = "MITRALVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[mv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_left_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
radius_mm=geom_valve_defs["mitral_valve_radius_mm"],
height_mm=geom_valve_defs["mitral_valve_thickness_mm"],
)
# 2 - TRICUSPID VALVE
tv_name = "TRICUSPIDVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[tv_name] = generate_valve_using_cylinder(
label_atrium=results[geom_atlas_names["atlas_right_atrium"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_valve_defs["tricuspid_valve_radius_mm"],
height_mm=geom_valve_defs["tricuspid_valve_thickness_mm"],
)
# 3 - AORTIC VALVE
av_name = "AORTICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[av_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_ascending_aorta"]],
label_ventricle=results[geom_atlas_names["atlas_left_ventricle"]],
valve_thickness_mm=geom_valve_defs["aortic_valve_thickness_mm"],
)
# 4 - PULMONIC VALVE
pv_name = "PULMONICVALVE" + geometric_segmentation_settings["geometric_name_suffix"]
results[pv_name] = generate_valve_from_great_vessel(
label_great_vessel=results[geom_atlas_names["atlas_pulmonary_artery"]],
label_ventricle=results[geom_atlas_names["atlas_right_ventricle"]],
valve_thickness_mm=geom_valve_defs["pulmonic_valve_thickness_mm"],
)
# 5 - SINOATRIAL NODE
san_name = "SAN" + geometric_segmentation_settings["geometric_name_suffix"]
results[san_name] = geometric_sinoatrialnode(
label_svc=results[geom_atlas_names["atlas_superior_vena_cava"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_wholeheart=results[geom_atlas_names["atlas_whole_heart"]],
radius_mm=geom_conduction_defs["sinoatrial_node_radius_mm"],
)
# 6 - ATRIOVENTRICULAR NODE
avn_name = "AVN" + geometric_segmentation_settings["geometric_name_suffix"]
results[avn_name] = geometric_atrioventricularnode(
label_la=results[geom_atlas_names["atlas_left_atrium"]],
label_lv=results[geom_atlas_names["atlas_left_ventricle"]],
label_ra=results[geom_atlas_names["atlas_right_atrium"]],
label_rv=results[geom_atlas_names["atlas_right_ventricle"]],
radius_mm=geom_conduction_defs["atrioventricular_node_radius_mm"],
)
"""
Step 8 - Post-processing
"""
postprocessing_settings = settings["postprocessing_settings"]
if postprocessing_settings["run_postprocessing"]:
logger.info("Running post-processing.")
# Remove any smaller components and perform morphological closing (hole filling)
binaryfillhole_img = [
int(postprocessing_settings["binaryfillhole_mm"] / sp) for sp in img.GetSpacing()
]
for structure_name in postprocessing_settings["structures_for_binaryfillhole"]:
if structure_name not in results.keys():
continue
contour_s = results[structure_name]
contour_s = sitk.RelabelComponent(sitk.ConnectedComponent(contour_s)) == 1
contour_s = sitk.BinaryMorphologicalClosing(contour_s, binaryfillhole_img)
results[structure_name] = contour_s
# Remove any overlaps
input_overlap = {
s: results[s] for s in postprocessing_settings["structures_for_overlap_correction"]
}
output_overlap = correct_volume_overlap(input_overlap)
for s in postprocessing_settings["structures_for_overlap_correction"]:
results[s] = output_overlap[s]
if return_as_cropped:
results["CROP_IMAGE"] = img_crop
logger.info("Done!")
return results, results_prob
| 35.918436 | 103 | 0.613339 | [
"Apache-2.0"
] | RadiotherapyAI/platipy | platipy/imaging/projects/cardiac/run.py | 32,147 | Python |
from behave import *
import requests
from django.contrib.auth.models import User
from rest_framework.authtoken.models import Token
from host.models import Event
use_step_matcher("re")
# @given("that I am a registered host of privilege walk events and want to create questions and answer choices for the event")
# def step_impl(context):
# context.username = "12thMan"
# context.password = "SomePassword123"
# context.first_name = "12th"
# context.last_name = "Man"
# context.email = "[email protected]"
# usr = User.objects.create_user(
# context.username,
# context.email,
# context.password
# )
# usr.first_name = context.first_name
# usr.last_name = context.last_name
# usr.save()
# registered_user = User.objects.filter(username="12thMan")
# assert len(registered_user) == 1
# user_auth_token, _ = Token.objects.get_or_create(user=usr)
# context.key = user_auth_token.key
# data = {
# "name": "New year event"
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
# context.event_api_response_data = resp.json()
# context.eventId = context.event_api_response_data["id"]
# @when("I make an API call to create questions API with my correct username, questions, answer choices and correct eventid")
# def step_impl(context):
# data = {
# "event_id": context.eventId,
# "title": "The question's title goes here",
# "choices": [
# {
# "description": "Pizza",
# "value": 1
# },
# {
# "description": "Ice Cream",
# "value": 2
# },
# {
# "description": "Salt Water",
# "value": -1
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 200 and resp.status_code < 300
# context.api_response_data = resp.json()
# @then("I expect the response that gives the status and id of the created question")
# def step_impl(context):
# assert context.api_response_data["status"] == "created"
# assert context.api_response_data["id"] != ""
# @given("that I am a registered host of privilege walk and wants to create questions but with wrong eventid")
# def step_impl(context):
# context.username = "12thMan"
# context.password = "SomePassword123"
# context.first_name = "12th"
# context.last_name = "Man"
# context.email = "[email protected]"
# usr = User.objects.create_user(
# context.username,
# context.email,
# context.password
# )
# usr.first_name = context.first_name
# usr.last_name = context.last_name
# usr.save()
# registered_user = User.objects.filter(username="12thMan")
# assert len(registered_user) == 1
# user_auth_token, _ = Token.objects.get_or_create(user=usr)
# context.key = user_auth_token.key
# data = {
# "name": "New year event"
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/events/create/", data, headers=headers)
# context.event_api_response_data = resp.json()
# context.eventId = context.event_api_response_data["id"]
# @when("I make an API call to create questions API with my username, questions, answer choices and wrong event id")
# def step_impl(context):
# data = {
# "event_id": 12,
# "title": "Are you under 20?",
# "choices": [
# {
# "description": "Yes",
# "value": "1"
# },
# {
# "description": "No",
# "value": "-1"
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 500
# context.api_response_data = resp.json()
# @then("I expect the response that says questions cannot be created as event id doesn't exist")
# def step_impl(context):
# pass
# @given("that I am a registered host of privilege walk and wants to create questions but without giving eventid")
# def step_impl(context):
# context.username = "12thMan"
# @when("I make an API call to create questions API with my username, questions, answer choices and without event id")
# def step_impl(context):
# data = {
# "title": "Are you under 20?",
# "choices": [
# {
# "description": "Yes",
# "value": "1"
# },
# {
# "description": "No",
# "value": "-1"
# }
# ]
# }
# headers = {
# 'Authorization':'Token '+ context.key
# }
# resp = requests.post(context.test.live_server_url + "/host/qa/create/", data, headers=headers)
# assert resp.status_code >= 500
# context.api_response_data = resp.json()
# @then("I expect the response that says questions cannot be created as event id is missing")
# def step_impl(context):
# pass
@given("that I am a registered host of privilege walk events and want to create questions but forgets to give username")
def step_impl(context):
context.username = "11thMan"
@when("I make an API call to create questions API with missing username in request")
def step_impl(context):
data = {
"title": "Are you under 20?",
"choices": [
{
"description": "Yes",
"value": "1"
},
{
"description": "No",
"value": "-1"
}
]
}
resp = requests.post(context.test.live_server_url + "/host/events/create/", data)
assert resp.status_code >= 400 and resp.status_code < 500
context.api_response_data = resp.json()
@then("I expect the response that says questions cannot be created and username is required in request")
def step_impl(context):
assert context.api_response_data["detail"] == "Authentication credentials were not provided."
| 29.925926 | 126 | 0.591894 | [
"MIT"
] | Privilege-walk/back-end | behave_tests/steps/create_question.py | 6,464 | Python |
import logging
import unittest
import random
from math import sqrt
from scipy.stats import chisquare
from type_system import Type, PolymorphicType, PrimitiveType, Arrow, List, UnknownType, INT, BOOL, STRING
from program import Program, Function, Variable, BasicPrimitive, New
from program_as_list import evaluation_from_compressed, reconstruct_from_compressed
from dsl import DSL
from DSL.deepcoder import semantics,primitive_types
from Algorithms.a_star import a_star
class TestSum(unittest.TestCase):
def test_programs(self):
"""
Checks the evaluation of programs
"""
p1 = BasicPrimitive("MAP")
p2 = BasicPrimitive("MAP", type_=PolymorphicType(name="test"))
# checking whether they represent the same programs and same types
self.assertTrue(repr(p1) == repr(p2))
self.assertTrue(p1.typeless_eq(p2))
self.assertFalse(p1.__eq__(p2))
self.assertFalse(id(p1) == id(p2))
t0 = PolymorphicType("t0")
t1 = PolymorphicType("t1")
semantics = {
"+1": lambda x: x + 1,
"MAP": lambda f: lambda l: list(map(f, l)),
}
primitive_types = {
"+1": Arrow(INT, INT),
"MAP": Arrow(Arrow(t0, t1), Arrow(List(t0), List(t1))),
}
toy_DSL = DSL(semantics, primitive_types)
p0 = Function(BasicPrimitive("+1"), [Variable(0)])
env = (2, None)
self.assertTrue(p0.eval(toy_DSL, env, 0) == 3)
p1 = Function(BasicPrimitive("MAP"), [BasicPrimitive("+1"), Variable(0)])
env = ([2, 4], None)
self.assertTrue(p1.eval(toy_DSL, env, 0) == [3, 5])
def test_evaluation_from_compressed(self):
"""
Check if evaluation_from_compressed evaluates correctly the programs
"""
N = 20_000 # we test against the first N programs
deepcoder = DSL(semantics, primitive_types)
type_request = Arrow(List(INT), List(INT))
deepcoder_CFG = deepcoder.DSL_to_CFG(type_request)
deepcoder_PCFG = deepcoder_CFG.CFG_to_Random_PCFG()
gen_a_star = a_star(deepcoder_PCFG)
environment = ([2, 3, 1], None)
r = type_request.returns()
for i in range(N):
program_compressed = next(gen_a_star)
program = reconstruct_from_compressed(program_compressed, r)
program_as_list = []
eval_from_compressed = evaluation_from_compressed(
program_compressed, deepcoder, environment, r
)
eval_from_program = program.eval_naive(deepcoder, environment)
self.assertEqual(eval_from_compressed, eval_from_program)
if __name__ == "__main__":
unittest.main(verbosity=2)
| 35.025641 | 105 | 0.640922 | [
"MIT"
] | agissaud/DeepSynth | unit_tests_programs.py | 2,732 | Python |
from enum import Enum
class ProcMessage(Enum):
SYNC_MODEL = 1
class JobCompletions():
SENDER_ID = 1
STATUS = True
RESULTS = {}
ERRORS = ""
| 13.5 | 24 | 0.623457 | [
"Apache-2.0"
] | rharish101/RecoEdge | fedrec/communications/messages.py | 162 | Python |
import os
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
def prerelease_local_scheme(version):
"""
Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in ('master', ):
return ''
else:
return get_local_node_and_date(version)
setup(
name='histomicsui',
use_scm_version={'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description='Organize, visualize, and analyze histology images.',
author='Kitware, Inc.',
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=[
'girder-large-image-annotation>=1.4.2',
'girder-slicer-cli-web[girder]>=1.2.0',
'girder-worker[girder]>=0.6.0',
'celery>=4.4.0rc5',
],
license='Apache Software License 2.0',
long_description=readme,
long_description_content_type='text/x-rst',
include_package_data=True,
keywords='girder-plugin, histomicsui',
packages=find_packages(exclude=['test', 'test.*']),
url='https://github.com/DigitalSlideArchive/histomicsui',
zip_safe=False,
python_requires='>=3.6',
entry_points={
'girder.plugin': [
'histomicsui = histomicsui:GirderPlugin'
]
},
)
| 32.920635 | 70 | 0.653809 | [
"Apache-2.0"
] | abcsFrederick/HistomicsUI | setup.py | 2,074 | Python |
import copy
import functools
import warnings
from types import MethodType
from typing import Dict, List, Optional, Type, Union
import dill
import pandas as pd
from feast.base_feature_view import BaseFeatureView
from feast.data_source import RequestSource
from feast.errors import RegistryInferenceFailure, SpecifiedFeaturesNotPresentError
from feast.feature import Feature
from feast.feature_view import FeatureView
from feast.feature_view_projection import FeatureViewProjection
from feast.field import Field, from_value_type
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureView as OnDemandFeatureViewProto,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
OnDemandFeatureViewMeta,
OnDemandFeatureViewSpec,
OnDemandSource,
)
from feast.protos.feast.core.OnDemandFeatureView_pb2 import (
UserDefinedFunction as UserDefinedFunctionProto,
)
from feast.type_map import (
feast_value_type_to_pandas_type,
python_type_to_feast_value_type,
)
from feast.usage import log_exceptions
from feast.value_type import ValueType
warnings.simplefilter("once", DeprecationWarning)
class OnDemandFeatureView(BaseFeatureView):
"""
[Experimental] An OnDemandFeatureView defines a logical group of features that are
generated by applying a transformation on a set of input sources, such as feature
views and request data sources.
Attributes:
name: The unique name of the on demand feature view.
features: The list of features in the output of the on demand feature view.
source_feature_view_projections: A map from input source names to actual input
sources with type FeatureViewProjection.
source_request_sources: A map from input source names to the actual input
sources with type RequestSource.
udf: The user defined transformation function, which must take pandas dataframes
as inputs.
description: A human-readable description.
tags: A dictionary of key-value pairs to store arbitrary metadata.
owner: The owner of the on demand feature view, typically the email of the primary
maintainer.
"""
# TODO(adchia): remove inputs from proto and declaration
name: str
features: List[Field]
source_feature_view_projections: Dict[str, FeatureViewProjection]
source_request_sources: Dict[str, RequestSource]
udf: MethodType
description: str
tags: Dict[str, str]
owner: str
@log_exceptions
def __init__(
self,
*args,
name: Optional[str] = None,
features: Optional[List[Feature]] = None,
sources: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
udf: Optional[MethodType] = None,
inputs: Optional[
Dict[str, Union[FeatureView, FeatureViewProjection, RequestSource]]
] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object.
Args:
name: The unique name of the on demand feature view.
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
udf (optional): The user defined transformation function, which must take pandas
dataframes as inputs.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["name", "features", "inputs", "udf"]
_name = name
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
_udf = udf
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_name = args[0]
if len(args) >= 2:
_schema = args[1]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 3:
_sources = args[2]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if len(args) >= 4:
_udf = args[3]
if not _name:
raise ValueError(
"The name of the on demand feature view must be specified."
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
super().__init__(
name=_name,
features=_schema,
description=description,
tags=tags,
owner=owner,
)
assert _sources is not None
self.source_feature_view_projections: Dict[str, FeatureViewProjection] = {}
self.source_request_sources: Dict[str, RequestSource] = {}
for source_name, odfv_source in _sources.items():
if isinstance(odfv_source, RequestSource):
self.source_request_sources[source_name] = odfv_source
elif isinstance(odfv_source, FeatureViewProjection):
self.source_feature_view_projections[source_name] = odfv_source
else:
self.source_feature_view_projections[
source_name
] = odfv_source.projection
if _udf is None:
raise ValueError("The `udf` parameter must be specified.")
assert _udf
self.udf = _udf
@property
def proto_class(self) -> Type[OnDemandFeatureViewProto]:
return OnDemandFeatureViewProto
def __copy__(self):
fv = OnDemandFeatureView(
name=self.name,
schema=self.features,
sources=dict(
**self.source_feature_view_projections, **self.source_request_sources,
),
udf=self.udf,
description=self.description,
tags=self.tags,
owner=self.owner,
)
fv.projection = copy.copy(self.projection)
return fv
def __eq__(self, other):
if not super().__eq__(other):
return False
if (
not self.source_feature_view_projections
== other.source_feature_view_projections
or not self.source_request_sources == other.source_request_sources
or not self.udf.__code__.co_code == other.udf.__code__.co_code
):
return False
return True
def __hash__(self):
return super().__hash__()
def to_proto(self) -> OnDemandFeatureViewProto:
"""
Converts an on demand feature view object to its protobuf representation.
Returns:
A OnDemandFeatureViewProto protobuf.
"""
meta = OnDemandFeatureViewMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.last_updated_timestamp:
meta.last_updated_timestamp.FromDatetime(self.last_updated_timestamp)
sources = {}
for source_name, fv_projection in self.source_feature_view_projections.items():
sources[source_name] = OnDemandSource(
feature_view_projection=fv_projection.to_proto()
)
for (source_name, request_sources,) in self.source_request_sources.items():
sources[source_name] = OnDemandSource(
request_data_source=request_sources.to_proto()
)
spec = OnDemandFeatureViewSpec(
name=self.name,
features=[feature.to_proto() for feature in self.features],
sources=sources,
user_defined_function=UserDefinedFunctionProto(
name=self.udf.__name__, body=dill.dumps(self.udf, recurse=True),
),
description=self.description,
tags=self.tags,
owner=self.owner,
)
return OnDemandFeatureViewProto(spec=spec, meta=meta)
@classmethod
def from_proto(cls, on_demand_feature_view_proto: OnDemandFeatureViewProto):
"""
Creates an on demand feature view from a protobuf representation.
Args:
on_demand_feature_view_proto: A protobuf representation of an on-demand feature view.
Returns:
A OnDemandFeatureView object based on the on-demand feature view protobuf.
"""
sources = {}
for (
source_name,
on_demand_source,
) in on_demand_feature_view_proto.spec.sources.items():
if on_demand_source.WhichOneof("source") == "feature_view":
sources[source_name] = FeatureView.from_proto(
on_demand_source.feature_view
).projection
elif on_demand_source.WhichOneof("source") == "feature_view_projection":
sources[source_name] = FeatureViewProjection.from_proto(
on_demand_source.feature_view_projection
)
else:
sources[source_name] = RequestSource.from_proto(
on_demand_source.request_data_source
)
on_demand_feature_view_obj = cls(
name=on_demand_feature_view_proto.spec.name,
schema=[
Field(
name=feature.name,
dtype=from_value_type(ValueType(feature.value_type)),
)
for feature in on_demand_feature_view_proto.spec.features
],
sources=sources,
udf=dill.loads(
on_demand_feature_view_proto.spec.user_defined_function.body
),
description=on_demand_feature_view_proto.spec.description,
tags=dict(on_demand_feature_view_proto.spec.tags),
owner=on_demand_feature_view_proto.spec.owner,
)
# FeatureViewProjections are not saved in the OnDemandFeatureView proto.
# Create the default projection.
on_demand_feature_view_obj.projection = FeatureViewProjection.from_definition(
on_demand_feature_view_obj
)
if on_demand_feature_view_proto.meta.HasField("created_timestamp"):
on_demand_feature_view_obj.created_timestamp = (
on_demand_feature_view_proto.meta.created_timestamp.ToDatetime()
)
if on_demand_feature_view_proto.meta.HasField("last_updated_timestamp"):
on_demand_feature_view_obj.last_updated_timestamp = (
on_demand_feature_view_proto.meta.last_updated_timestamp.ToDatetime()
)
return on_demand_feature_view_obj
def get_request_data_schema(self) -> Dict[str, ValueType]:
schema: Dict[str, ValueType] = {}
for request_source in self.source_request_sources.values():
if isinstance(request_source.schema, List):
new_schema = {}
for field in request_source.schema:
new_schema[field.name] = field.dtype.to_value_type()
schema.update(new_schema)
elif isinstance(request_source.schema, Dict):
schema.update(request_source.schema)
else:
raise Exception(
f"Request source schema is not correct type: ${str(type(request_source.schema))}"
)
return schema
def get_transformed_features_df(
self, df_with_features: pd.DataFrame, full_feature_names: bool = False,
) -> pd.DataFrame:
# Apply on demand transformations
columns_to_cleanup = []
for source_fv_projection in self.source_feature_view_projections.values():
for feature in source_fv_projection.features:
full_feature_ref = f"{source_fv_projection.name}__{feature.name}"
if full_feature_ref in df_with_features.keys():
# Make sure the partial feature name is always present
df_with_features[feature.name] = df_with_features[full_feature_ref]
columns_to_cleanup.append(feature.name)
elif feature.name in df_with_features.keys():
# Make sure the full feature name is always present
df_with_features[full_feature_ref] = df_with_features[feature.name]
columns_to_cleanup.append(full_feature_ref)
# Compute transformed values and apply to each result row
df_with_transformed_features = self.udf.__call__(df_with_features)
# Work out whether the correct columns names are used.
rename_columns: Dict[str, str] = {}
for feature in self.features:
short_name = feature.name
long_name = f"{self.projection.name_to_use()}__{feature.name}"
if (
short_name in df_with_transformed_features.columns
and full_feature_names
):
rename_columns[short_name] = long_name
elif not full_feature_names:
# Long name must be in dataframe.
rename_columns[long_name] = short_name
# Cleanup extra columns used for transformation
df_with_features.drop(columns=columns_to_cleanup, inplace=True)
return df_with_transformed_features.rename(columns=rename_columns)
def infer_features(self):
"""
Infers the set of features associated to this feature view from the input source.
Raises:
RegistryInferenceFailure: The set of features could not be inferred.
"""
df = pd.DataFrame()
for feature_view_projection in self.source_feature_view_projections.values():
for feature in feature_view_projection.features:
dtype = feast_value_type_to_pandas_type(feature.dtype.to_value_type())
df[f"{feature_view_projection.name}__{feature.name}"] = pd.Series(
dtype=dtype
)
df[f"{feature.name}"] = pd.Series(dtype=dtype)
for request_data in self.source_request_sources.values():
for field in request_data.schema:
dtype = feast_value_type_to_pandas_type(field.dtype.to_value_type())
df[f"{field.name}"] = pd.Series(dtype=dtype)
output_df: pd.DataFrame = self.udf.__call__(df)
inferred_features = []
for f, dt in zip(output_df.columns, output_df.dtypes):
inferred_features.append(
Field(
name=f,
dtype=from_value_type(
python_type_to_feast_value_type(f, type_name=str(dt))
),
)
)
if self.features:
missing_features = []
for specified_features in self.features:
if specified_features not in inferred_features:
missing_features.append(specified_features)
if missing_features:
raise SpecifiedFeaturesNotPresentError(
[f.name for f in missing_features], self.name
)
else:
self.features = inferred_features
if not self.features:
raise RegistryInferenceFailure(
"OnDemandFeatureView",
f"Could not infer Features for the feature view '{self.name}'.",
)
@staticmethod
def get_requested_odfvs(feature_refs, project, registry):
all_on_demand_feature_views = registry.list_on_demand_feature_views(
project, allow_cache=True
)
requested_on_demand_feature_views: List[OnDemandFeatureView] = []
for odfv in all_on_demand_feature_views:
for feature in odfv.features:
if f"{odfv.name}:{feature.name}" in feature_refs:
requested_on_demand_feature_views.append(odfv)
break
return requested_on_demand_feature_views
# TODO(felixwang9817): Force this decorator to accept kwargs and switch from
# `features` to `schema`.
def on_demand_feature_view(
*args,
features: Optional[List[Feature]] = None,
sources: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
inputs: Optional[Dict[str, Union[FeatureView, RequestSource]]] = None,
schema: Optional[List[Field]] = None,
description: str = "",
tags: Optional[Dict[str, str]] = None,
owner: str = "",
):
"""
Creates an OnDemandFeatureView object with the given user function as udf.
Args:
features (deprecated): The list of features in the output of the on demand
feature view, after the transformation has been applied.
sources (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
inputs (optional): A map from input source names to the actual input sources,
which may be feature views, feature view projections, or request data sources.
These sources serve as inputs to the udf, which will refer to them by name.
schema (optional): The list of features in the output of the on demand feature
view, after the transformation has been applied.
description (optional): A human-readable description.
tags (optional): A dictionary of key-value pairs to store arbitrary metadata.
owner (optional): The owner of the on demand feature view, typically the email
of the primary maintainer.
"""
positional_attributes = ["features", "inputs"]
_schema = schema or []
if len(_schema) == 0 and features is not None:
_schema = [Field.from_feature(feature) for feature in features]
if features is not None:
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
_sources = sources or inputs
if inputs and sources:
raise ValueError("At most one of `sources` or `inputs` can be specified.")
elif inputs:
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if args:
warnings.warn(
(
"On demand feature view parameters should be specified as keyword arguments "
"instead of positional arguments. Feast 0.23 and onwards will not support "
"positional arguments in on demand feature view definitions."
),
DeprecationWarning,
)
if len(args) > len(positional_attributes):
raise ValueError(
f"Only {', '.join(positional_attributes)} are allowed as positional args "
f"when defining feature views, for backwards compatibility."
)
if len(args) >= 1:
_schema = args[0]
# Convert Features to Fields.
if len(_schema) > 0 and isinstance(_schema[0], Feature):
_schema = [Field.from_feature(feature) for feature in _schema]
warnings.warn(
(
"The `features` parameter is being deprecated in favor of the `schema` parameter. "
"Please switch from using `features` to `schema`. This will also requiring switching "
"feature definitions from using `Feature` to `Field`. Feast 0.21 and onwards will not "
"support the `features` parameter."
),
DeprecationWarning,
)
if len(args) >= 2:
_sources = args[1]
warnings.warn(
(
"The `inputs` parameter is being deprecated. Please use `sources` instead. "
"Feast 0.21 and onwards will not support the `inputs` parameter."
),
DeprecationWarning,
)
if not _sources:
raise ValueError("The `sources` parameter must be specified.")
def decorator(user_function):
on_demand_feature_view_obj = OnDemandFeatureView(
name=user_function.__name__,
sources=_sources,
schema=_schema,
udf=user_function,
description=description,
tags=tags,
owner=owner,
)
functools.update_wrapper(
wrapper=on_demand_feature_view_obj, wrapped=user_function
)
return on_demand_feature_view_obj
return decorator
| 42.14188 | 111 | 0.612461 | [
"Apache-2.0"
] | aurobindoc/feast | sdk/python/feast/on_demand_feature_view.py | 24,653 | Python |
#!/usr/bin/env python
# coding=utf8
from copy import deepcopy
class Deque:
def __init__(self):
self.data = []
def addFront(self, item):
self.data.insert(0, item)
def addTail(self, item):
self.data.append(item)
def removeFront(self):
if self.size() == 0:
return None
else:
value = deepcopy(self.data[0])
del self.data[0]
return value
def removeTail(self):
if self.size() == 0:
return None
else:
value = deepcopy(self.data[-1])
del self.data[-1]
return value
def size(self):
return len(self.data)
def check_palindrome(check_value):
deque = Deque()
# Reading data into deque
for c in check_value:
deque.addTail(c)
# Comparing each symbol on both sides, if not equal - not palindrome
while deque.size() > 1:
if deque.removeTail() != deque.removeFront():
return False
# If all check was succeeded, string is a palindrome
return True
| 21.6 | 72 | 0.564815 | [
"MIT"
] | igelfiend/Python.Structures.Deque | palindrome_check.py | 1,080 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyQt5 import QtWidgets, QtGui, QtCore
import sys, os.path as op
path1 = op.join( op.abspath(op.dirname(__file__)), '..', 'Structure')
path2 = op.join( op.abspath(op.dirname(__file__)), '..')
sys.path.append(path1)
sys.path.append(path2)
from Structure import *
from VisObject import *
class SubVision( QtWidgets.QWidget ):
""" Базовый класс-окно для показа подчиненных объектов """
def __init__( self, main_object, is_change=True, parent=None ):
super().__init__( parent=parent )
#Устанавливаем главный объект
self.__obj = main_object
#Устанавливаем параметр возможности изменения элементов (по умолчанию - Да)
self.is_change = is_change
self.initUI()
def initUI( self ):
''' Инициализируем содержимое окна '''
#Добавляем окно данных и устанавливаем в него подчиненные объекты
self.sub_objs = QtWidgets.QListWidget( )
for obj in self.__obj.sub_objects:
#Делаем ячейку
a = QtWidgets.QListWidgetItem()
#Устанавливаем в ней подчиненный базовому объект
a.sub_obj = obj
#Устанавливаем в ней текст-имя объекта подчиненного объекта
a.setText( obj.name )
#Добавляем в список
self.sub_objs.addItem( a )
#Объявляем форму и добавляем в нее список подчиненных объектов
self.form = QtWidgets.QFormLayout()
self.form.addRow(self.sub_objs)
self.setLayout(self.form)
#Соединяем двойной щелчок с методом
self.sub_objs.itemDoubleClicked.connect( self.isDoubleClicked )
def isDoubleClicked( self, obj ):
#Если окно возможно изменить, вызываем окно изменения, иначе - окно просмотра
if self.is_change:
sub_window = ChangeVisObject( obj.sub_obj, parent=self )
else:
sub_window = SimpleVisObject( obj.sub_obj, parent=self )
sub_window.setWindowTitle( "Редактирование объекта: " + obj.sub_obj.name )
#Делаем это или родительское окно неактивным
if self.parent() is None:
self.setEnabled( False )
else:
self.parent().setEnabled( False )
#Делаем дочернее окно активным и показываем его
sub_window.setEnabled( True )
sub_window.show()
| 38.885246 | 85 | 0.643339 | [
"MIT"
] | bochkovoi/AHP | src/gui/SubVision.py | 2,955 | Python |
#!/usr/bin/env python
#version 2.1
from PyQt4 import QtGui
from PyQt4 import QtCore
from PyQt4 import Qt
import PyQt4.Qwt5 as Qwt
from PyQt4.QtCore import pyqtSignal
class control_button_frame(QtGui.QFrame):
def __init__(self, parent=None, az_el = None):
super(control_button_frame, self).__init__()
self.parent = parent
self.az_el = az_el
self.initUI()
def initUI(self):
self.setFrameShape(QtGui.QFrame.StyledPanel)
self.init_widgets()
self.connect_signals()
def init_widgets(self):
self.MinusTenButton = QtGui.QPushButton(self)
self.MinusTenButton.setText("-10.0")
self.MinusTenButton.setMinimumWidth(45)
self.MinusOneButton = QtGui.QPushButton(self)
self.MinusOneButton.setText("-1.0")
self.MinusOneButton.setMinimumWidth(45)
self.MinusPtOneButton = QtGui.QPushButton(self)
self.MinusPtOneButton.setText("-0.1")
self.MinusPtOneButton.setMinimumWidth(45)
self.PlusPtOneButton = QtGui.QPushButton(self)
self.PlusPtOneButton.setText("+0.1")
self.PlusPtOneButton.setMinimumWidth(45)
self.PlusOneButton = QtGui.QPushButton(self)
self.PlusOneButton.setText("+1.0")
self.PlusOneButton.setMinimumWidth(45)
self.PlusTenButton = QtGui.QPushButton(self)
self.PlusTenButton.setText("+10.0")
self.PlusTenButton.setMinimumWidth(45)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.MinusTenButton)
hbox1.addWidget(self.MinusOneButton)
hbox1.addWidget(self.MinusPtOneButton)
hbox1.addWidget(self.PlusPtOneButton)
hbox1.addWidget(self.PlusOneButton)
hbox1.addWidget(self.PlusTenButton)
self.setLayout(hbox1)
def connect_signals(self):
self.PlusPtOneButton.clicked.connect(self.button_clicked)
self.PlusOneButton.clicked.connect(self.button_clicked)
self.PlusTenButton.clicked.connect(self.button_clicked)
self.MinusPtOneButton.clicked.connect(self.button_clicked)
self.MinusOneButton.clicked.connect(self.button_clicked)
self.MinusTenButton.clicked.connect(self.button_clicked)
def button_clicked(self):
sender = self.sender()
self.parent.increment_target_angle(self.az_el,float(sender.text()))
| 34.217391 | 83 | 0.694197 | [
"MIT"
] | vt-gs/tracking | gui/v2.1/control_button_frame.py | 2,361 | Python |
'''base config for emanet'''
# config for dataset
DATASET_CFG = {
'train': {
'type': '',
'set': 'train',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 512), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 512), 'data_type': 'tensor'}),]
},
'test': {
'type': '',
'set': 'val',
'rootdir': '',
'aug_opts': [('Resize', {'output_size': (2048, 512), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),]
}
}
# config for dataloader
DATALOADER_CFG = {
'train': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 16,
'num_workers': 16,
'shuffle': True,
'pin_memory': True,
'drop_last': True,
},
'test': {
'type': ['nondistributed', 'distributed'][1],
'batch_size': 1,
'num_workers': 16,
'shuffle': False,
'pin_memory': True,
'drop_last': False,
}
}
# config for optimizer
OPTIMIZER_CFG = {
'type': 'sgd',
'sgd': {
'learning_rate': 0.01,
'momentum': 0.9,
'weight_decay': 5e-4,
},
'max_epochs': 0,
'params_rules': {},
'filter_params': True,
'policy': {
'type': 'poly',
'opts': {'power': 0.9, 'max_iters': None, 'num_iters': None, 'num_epochs': None}
},
'adjust_period': ['iteration', 'epoch'][0],
}
# config for losses
LOSSES_CFG = {
'loss_aux': {
'celoss': {'scale_factor': 0.4, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
'loss_cls': {
'celoss': {'scale_factor': 1.0, 'opts': {'ignore_index': 255, 'reduction': 'mean'}}
},
}
# config for model
MODEL_CFG = {
'type': 'emanet',
'num_classes': -1,
'benchmark': True,
'is_multi_gpus': True,
'align_corners': False,
'distributed': {'is_on': True, 'backend': 'nccl'},
'norm_cfg': {'type': 'syncbatchnorm', 'opts': {}},
'act_cfg': {'type': 'relu', 'opts': {'inplace': True}},
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 8,
'use_stem': True,
'selected_indices': (2, 3),
},
'ema': {
'in_channels': 2048,
'ema_channels': 512,
'momentum': 0.1,
'num_stages': 3,
'num_bases': 64,
},
'decoder': {
'in_channels': 2560,
'out_channels': 512,
'dropout': 0.1,
},
'auxiliary': {
'in_channels': 1024,
'out_channels': 512,
'dropout': 0.1,
}
}
# config for inference
INFERENCE_CFG = {
'mode': 'whole',
'opts': {},
'tricks': {
'multiscale': [1],
'flip': False,
'use_probs_before_resize': False
}
}
# config for common
COMMON_CFG = {
'train': {
'backupdir': '',
'logfilepath': '',
'loginterval': 50,
'saveinterval': 1
},
'test': {
'backupdir': '',
'logfilepath': '',
'resultsavepath': ''
}
} | 27.457364 | 109 | 0.474873 | [
"MIT"
] | skydengyao/sssegmentation | ssseg/cfgs/emanet/base_cfg.py | 3,542 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-05-29 06:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djeddit', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='thread',
name='locked',
field=models.BooleanField(default=False),
),
]
| 21 | 53 | 0.60771 | [
"Apache-2.0"
] | EatEmAll/django-djedd | djeddit/migrations/0002_thread_locked.py | 441 | Python |
from collections import defaultdict
class Graph:
def __init__(self, numberOfNodes):
self.numberOfNodes = numberOfNodes+1
self.graph = [[0 for x in range(numberOfNodes+1)]
for y in range(numberOfNodes+1)]
def withInBounds(self, v1, v2):
return (v1 >= 0 and v1 <= self.numberOfNodes) and (v2 >= 0 and v2 <= self.numberOfNodes)
def insertEdge(self, v1, v2):
if(self.withInBounds(v1, v2)):
self.graph[v1][v2] = 1
def printGraph(self):
for i in range(self.numberOfNodes):
for j in range(len(self.graph[i])):
if(self.graph[i][j]):
print(i, "->", j)
g = Graph(5)
g.insertEdge(1, 2)
g.insertEdge(2, 3)
g.insertEdge(4, 5)
g.printGraph()
| 25 | 96 | 0.575484 | [
"MIT"
] | PawanRamaMali/LeetCode | Graphs/graphs creation/directed graph/adjacency matrix/index.py | 775 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import logging
import re
import os
import requests
from builtins import str
from typing import Text, List, Dict, Any
logger = logging.getLogger(__name__)
INTENT_MESSAGE_PREFIX = "/"
class NaturalLanguageInterpreter(object):
def parse(self, text):
raise NotImplementedError(
"Interpreter needs to be able to parse "
"messages into structured output.")
@staticmethod
def create(obj):
if isinstance(obj, NaturalLanguageInterpreter):
return obj
if isinstance(obj, str):
return RasaNLUInterpreter(model_directory=obj)
else:
return RegexInterpreter() # default interpreter
class RegexInterpreter(NaturalLanguageInterpreter):
@staticmethod
def allowed_prefixes():
return INTENT_MESSAGE_PREFIX + "_" # _ is deprecated but supported
@staticmethod
def _create_entities(parsed_entities, sidx, eidx):
entities = []
for k, vs in parsed_entities.items():
if not isinstance(vs, list):
vs = [vs]
for value in vs:
entities.append({
"entity": k,
"start": sidx,
"end": eidx, # can't be more specific
"value": value
})
return entities
@staticmethod
def _parse_parameters(entitiy_str, sidx, eidx, user_input):
# type: (Text, int, int, Text) -> List[Dict[Text, Any]]
if entitiy_str is None or not entitiy_str.strip():
# if there is nothing to parse we will directly exit
return []
try:
parsed_entities = json.loads(entitiy_str)
if isinstance(parsed_entities, dict):
return RegexInterpreter._create_entities(parsed_entities,
sidx, eidx)
else:
raise Exception("Parsed value isn't a json object "
"(instead parser found '{}')"
".".format(type(parsed_entities)))
except Exception as e:
logger.warning("Invalid to parse arguments in line "
"'{}'. Failed to decode parameters"
"as a json object. Make sure the intent"
"followed by a proper json object. "
"Error: {}".format(user_input, e))
return []
@staticmethod
def extract_intent_and_entities(user_input):
# type: (Text) -> object
"""Parse the user input using regexes to extract intent & entities."""
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
# the regex matches "slot{"a": 1}"
m = re.search('^['+prefixes+']?([^{]+)([{].+)?', user_input)
if m is not None:
event_name = m.group(1).strip()
entities = RegexInterpreter._parse_parameters(m.group(2),
m.start(2),
m.end(2),
user_input)
return event_name, entities
else:
logger.warning("Failed to parse intent end entities from "
"'{}'. ".format(user_input))
return None, []
@staticmethod
def deprecated_extraction(user_input):
"""DEPRECATED parse of user input message."""
value_assign_rx = '\s*(.+)\s*=\s*(.+)\s*'
prefixes = re.escape(RegexInterpreter.allowed_prefixes())
structured_message_rx = '^['+prefixes+']?([^\[]+)(\[(.+)\])?'
m = re.search(structured_message_rx, user_input)
if m is not None:
intent = m.group(1).lower()
offset = m.start(3)
entities_str = m.group(3)
entities = []
if entities_str is not None:
for entity_str in entities_str.split(','):
for match in re.finditer(value_assign_rx, entity_str):
start = match.start(2) + offset
end = match.end(0) + offset
entity = {
"entity": match.group(1),
"start": start,
"end": end,
"value": match.group(2)}
entities.append(entity)
return intent, entities
else:
return None, []
@staticmethod
def is_using_deprecated_format(text):
"""Indicates if the text string is using the deprecated intent format.
In the deprecated format entities where annotated using `[name=Rasa]`
which has been replaced with `{"name": "Rasa"}`."""
return (text.find("[") != -1
and (text.find("{") == -1 or
text.find("[") < text.find("{")))
def parse(self, text):
"""Parse a text message."""
if self.is_using_deprecated_format(text):
intent, entities = self.deprecated_extraction(text)
else:
intent, entities = self.extract_intent_and_entities(text)
return {
'text': text,
'intent': {
'name': intent,
'confidence': 1.0,
},
'intent_ranking': [{
'name': intent,
'confidence': 1.0,
}],
'entities': entities,
}
class RasaNLUHttpInterpreter(NaturalLanguageInterpreter):
def __init__(self, model_name=None, token=None, server='http://localhost:5000', project_name='default'):
self.model_name = model_name
self.token = token
self.server = server
self.project_name = project_name
def parse(self, text):
"""Parse a text message.
Return a default value if the parsing of the text failed."""
default_return = {"intent": {"name": "", "confidence": 0.0},
"entities": [], "text": ""}
result = self._rasa_http_parse(text)
return result if result is not None else default_return
def _rasa_http_parse(self, text):
"""Send a text message to a running rasa NLU http server.
Return `None` on failure."""
if not self.server:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"No rasa NLU server specified!".format(text))
return None
params = {
"token": self.token,
"model": self.model_name,
"project": self.project_name,
"q": text
}
url = "{}/parse".format(self.server)
try:
result = requests.get(url, params=params)
if result.status_code == 200:
return result.json()
else:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"Error: {}".format(text, result.text))
return None
except Exception as e:
logger.error(
"Failed to parse text '{}' using rasa NLU over http. "
"Error: {}".format(text, e))
return None
class RasaNLUInterpreter(NaturalLanguageInterpreter):
def __init__(self, model_directory, config_file=None, lazy_init=False):
self.model_directory = model_directory
self.lazy_init = lazy_init
self.config_file = config_file
if not lazy_init:
self._load_interpreter()
else:
self.interpreter = None
def parse(self, text):
"""Parse a text message.
Return a default value if the parsing of the text failed."""
if self.lazy_init and self.interpreter is None:
self._load_interpreter()
return self.interpreter.parse(text)
def _load_interpreter(self):
from rasa_nlu.model import Interpreter
self.interpreter = Interpreter.load(self.model_directory)
| 34.890756 | 108 | 0.53408 | [
"Apache-2.0"
] | RocketChat/rasa_core | rasa_core/interpreter.py | 8,304 | Python |
# Copyright 2022 The SeqIO Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for seqio.dataset_providers."""
import copy
import functools
import os
import shutil
from typing import Any, Callable, Mapping, Optional, Sequence
from absl.testing import absltest
from absl.testing import parameterized
from seqio import dataset_providers
from seqio import feature_converters
from seqio import metrics as metrics_lib
from seqio import preprocessors
from seqio import test_utils
from seqio import utils
from seqio import vocabularies
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
tf.compat.v1.enable_eager_execution()
TaskRegistry = dataset_providers.TaskRegistry
MixtureRegistry = dataset_providers.MixtureRegistry
mock = absltest.mock
assert_dataset = test_utils.assert_dataset
create_default_dataset = test_utils.create_default_dataset
class TasksTest(test_utils.FakeTaskTest):
def test_invalid_name(self):
with self.assertRaisesRegex(
ValueError,
"Task name 'invalid/name' contains invalid characters. "
"Must match regex: .*"):
self.add_task("invalid/name", self.function_source)
def test_repeat_name(self):
with self.assertRaisesWithLiteralMatch(
ValueError,
"Attempting to register duplicate provider: text_line_task"):
self.add_task("text_line_task", self.text_line_source)
def test_function_source_signature(self):
# Good signatures.
def good_fn(split, shuffle_files):
del split
del shuffle_files
dataset_providers.FunctionDataSource(good_fn, splits=("train",))
def default_good_fn(split, shuffle_files=False):
del split
del shuffle_files
dataset_providers.FunctionDataSource(default_good_fn, splits=("train",))
def seed_fn(split, shuffle_files=True, seed=0):
del split
del shuffle_files
del seed
dataset_providers.FunctionDataSource(seed_fn, splits=("train",))
def extra_kwarg_good_fn(split, shuffle_files, unused_kwarg=True):
del split
del shuffle_files
dataset_providers.FunctionDataSource(extra_kwarg_good_fn, splits=("train",))
# Bad signatures.
with self.assertRaisesWithLiteralMatch(
ValueError,
"'missing_shuff' must have positional args ('split', 'shuffle_files'), "
"got: ('split',)"):
def missing_shuff(split):
del split
dataset_providers.FunctionDataSource(missing_shuff, splits=("train",))
with self.assertRaisesWithLiteralMatch(
ValueError,
"'missing_split' must have positional args ('split', 'shuffle_files'), "
"got: ('shuffle_files',)"):
def missing_split(shuffle_files):
del shuffle_files
dataset_providers.FunctionDataSource(missing_split, splits=("train",))
with self.assertRaisesWithLiteralMatch(
ValueError,
"'extra_pos_arg' may only have positional args ('split', "
"'shuffle_files'), got: ('split', 'shuffle_files', 'unused_arg')"):
def extra_pos_arg(split, shuffle_files, unused_arg):
del split
del shuffle_files
dataset_providers.FunctionDataSource(extra_pos_arg, splits=("train",))
def test_metric_fn_signature(self):
# pylint:disable=unused-argument
add_task = functools.partial(self.add_task, source=self.function_source)
def score_metric_fn(targets, scores):
return {}
def predict_metric_fn(targets, predictions):
return {}
valid_task = add_task(
"valid_metrics", metric_fns=[score_metric_fn, predict_metric_fn])
self.assertSameElements(
[score_metric_fn, predict_metric_fn], valid_task.metric_fns)
self.assertSameElements(
[score_metric_fn], valid_task.score_metric_fns)
self.assertSameElements(
[predict_metric_fn], valid_task.predict_metric_fns)
def extra_arg_metric_fn(targets, predictions, extra_param):
return {}
expected_error_message_prefix = (
"Metric functions must have positional arguments matching either "
"('targets', 'predictions') or ('targets', 'scores'). Got: ")
with self.assertRaisesWithLiteralMatch(
ValueError,
expected_error_message_prefix +
"('targets', 'predictions', 'extra_param')"):
valid_task = add_task(
"extra_arg_metric", metric_fns=[extra_arg_metric_fn])
def bad_order_metric_fn(predictions, targets):
return {}
with self.assertRaisesWithLiteralMatch(
ValueError,
expected_error_message_prefix + "('predictions', 'targets')"):
valid_task = add_task(
"bad_order_metric", metric_fns=[bad_order_metric_fn])
def bad_default_metric_fn(targets, predictions=(0)):
return {}
with self.assertRaisesWithLiteralMatch(
ValueError,
expected_error_message_prefix + "('targets',)"):
valid_task = add_task(
"bad_default_metric", metric_fns=[bad_default_metric_fn])
def ok_default_metric_fn(targets, predictions, extra_param=3):
return {}
valid_task_2 = add_task(
"valid_metrics_2", metric_fns=[ok_default_metric_fn])
self.assertSameElements([ok_default_metric_fn], valid_task_2.metric_fns)
self.assertEmpty(valid_task_2.score_metric_fns)
self.assertSameElements(
[ok_default_metric_fn], valid_task_2.predict_metric_fns)
def predict_metric_fn_with_types(
targets: Sequence[Mapping[str,
Any]], predictions: Sequence[Mapping[str,
Any]]
) -> Mapping[str, metrics_lib.MetricValue]:
return {}
valid_task_with_types = TaskRegistry.add(
"valid_metrics_with_types",
source=self.function_source,
output_features={
"inputs":
dataset_providers.Feature(test_utils.sentencepiece_vocab()),
"targets":
dataset_providers.Feature(test_utils.sentencepiece_vocab())
},
metric_fns=[predict_metric_fn_with_types])
self.assertSameElements([predict_metric_fn_with_types],
valid_task_with_types.metric_fns)
# pylint:enable=unused-argument
def test_no_tfds_version(self):
with self.assertRaisesWithLiteralMatch(
ValueError, "TFDS name must contain a version number, got: fake"):
dataset_providers.TfdsDataSource(tfds_name="fake")
def test_tfds_splits(self):
self.assertSameElements(
["train", "validation"],
dataset_providers.TfdsDataSource(tfds_name="fake:0.0.0").splits)
self.assertSameElements(
["validation"],
dataset_providers.TfdsDataSource(
tfds_name="fake:0.0.0", splits=["validation"]).splits)
self.assertSameElements(
["validation"],
dataset_providers.TfdsDataSource(
tfds_name="fake:0.0.0", splits={"validation": "train"}).splits)
def test_tfds_task(self):
self.verify_task_matches_fake_datasets(
"tfds_task", use_cached=False)
def test_function_task(self):
self.verify_task_matches_fake_datasets(
"function_task", use_cached=False)
def test_text_line_task(self):
self.verify_task_matches_fake_datasets(
"text_line_task", use_cached=False, splits=["train"])
def test_tf_example_task(self):
self.verify_task_matches_fake_datasets(
"tf_example_task", use_cached=False, splits=["train"])
@mock.patch.object(tf.io.gfile, "glob")
def test_file_data_source_shuffle_buffer_low(self, mock_glob):
mock_glob.return_value = [f"{i}" for i in range(20)]
fds = dataset_providers.FileDataSource(
read_file_fn=lambda x: tf.data.Dataset.from_tensor_slices([x]),
split_to_filepattern={"train": "filepattern"},
file_shuffle_buffer_size=2)
for _ in range(10):
ds = [
d.decode() for d in tfds.as_numpy(
fds.get_dataset("train", shuffle=True, seed=23))
]
self.assertListEqual(
ds,
[ # Not a great shuffle.
"0", "2", "1", "4", "5", "3", "7", "6", "9", "10", "11", "8",
"13", "14", "12", "16", "15", "18", "17", "19"
])
@mock.patch.object(tf.io.gfile, "glob")
def test_file_data_source_shuffle_buffer_full(self, mock_glob):
mock_glob.return_value = [f"{i}" for i in range(20)]
fds = dataset_providers.FileDataSource(
read_file_fn=lambda x: tf.data.Dataset.from_tensor_slices([x]),
split_to_filepattern={"train": "filepattern"},
file_shuffle_buffer_size=None)
for _ in range(10):
ds = [
d.decode() for d in tfds.as_numpy(
fds.get_dataset("train", shuffle=True, seed=23))
]
self.assertListEqual(
ds,
[ # Good shuffle.
"2", "13", "12", "19", "15", "5", "9", "1", "6", "8", "3", "0",
"10", "4", "14", "7", "16", "17", "18", "11"
])
def _get_preps_with_cache_placeholder_buffer_size(self, buffer_size):
preps = list(self.DEFAULT_PREPROCESSORS)
for i, p in enumerate(preps):
if isinstance(p, dataset_providers.CacheDatasetPlaceholder):
preps[i] = dataset_providers.CacheDatasetPlaceholder(
file_shuffle_buffer_size=buffer_size)
return preps
def _mock_and_assert_cached_source(self, task_name, buffer_size):
cached_task = dataset_providers.get_mixture_or_task(task_name)
cached_task._get_cached_source = mock.MagicMock(
side_effect=cached_task._get_cached_source)
_ = cached_task.get_dataset(None, "train", use_cached=True)
cached_task._get_cached_source.assert_called_once_with(
"train", buffer_size)
def test_cached_data_source_shuffle_buffer_default(self):
self._mock_and_assert_cached_source("cached_task", None)
def test_cached_data_source_shuffle_buffer_set(self):
self.add_task("cached_task_buf_2", self.tfds_source,
self._get_preps_with_cache_placeholder_buffer_size(2))
shutil.copytree(self.cached_task_dir,
os.path.join(self.test_data_dir, "cached_task_buf_2"))
self._mock_and_assert_cached_source("cached_task_buf_2", 2)
def test_cached_data_source_shuffle_buffer_None(self):
self.add_task("cached_task_buf_None", self.tfds_source,
self._get_preps_with_cache_placeholder_buffer_size(None))
shutil.copytree(self.cached_task_dir,
os.path.join(self.test_data_dir, "cached_task_buf_None"))
self._mock_and_assert_cached_source("cached_task_buf_None", None)
def test_proto_task(self):
self.verify_task_matches_fake_datasets(
"proto_task", use_cached=False, splits=["train"])
def test_num_input_examples(self):
self.assertEqual(30, self.cached_task.num_input_examples("train"))
self.assertEqual(10, self.cached_task.num_input_examples("validation"))
def test_disallow_shuffle(self):
task = dataset_providers.Task(
"no_shuffle",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=self.DEFAULT_PREPROCESSORS,
shuffle_buffer_size=None)
with self.assertRaisesWithLiteralMatch(
ValueError, "Shuffling is disallowed for Task 'no_shuffle' since its "
"`shuffle_buffer_size` was set to `None` on construction."):
task.get_dataset(None, shuffle=True)
with self.assertRaisesWithLiteralMatch(
ValueError, "Shuffling is disallowed for Task 'no_shuffle' since its "
"`shuffle_buffer_size` was set to `None` on construction."):
task.get_dataset(None, shuffle=True, shuffle_buffer_size=100)
task.get_dataset(None, shuffle=False)
def test_supports_caching(self):
self.assertFalse(
dataset_providers.Task(
"nosupports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[]).supports_caching)
self.assertFalse(
dataset_providers.Task(
"nosupports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[preprocessors.tokenize]).supports_caching)
self.assertTrue(
dataset_providers.Task(
"supports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[
preprocessors.tokenize,
dataset_providers.CacheDatasetPlaceholder()
]).supports_caching)
self.assertTrue(
dataset_providers.Task(
"supports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(required=True),
preprocessors.tokenize,
]).supports_caching)
self.assertTrue(
dataset_providers.Task(
"supports_cache",
source=self.function_source,
output_features=self.DEFAULT_OUTPUT_FEATURES,
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(),
]).supports_caching)
def test_requires_caching(self):
self.assertFalse(
dataset_providers.Task(
"nosupports_cache",
output_features=self.DEFAULT_OUTPUT_FEATURES,
source=self.function_source,
preprocessors=[preprocessors.tokenize]).requires_caching)
self.assertFalse(
dataset_providers.Task(
"supports_cache",
output_features=self.DEFAULT_OUTPUT_FEATURES,
source=self.function_source,
preprocessors=[
preprocessors.tokenize,
dataset_providers.CacheDatasetPlaceholder()
]).requires_caching)
task = dataset_providers.Task(
"requires_cache",
output_features=self.DEFAULT_OUTPUT_FEATURES,
source=self.function_source,
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(required=True),
preprocessors.tokenize,
])
self.assertTrue(task.requires_caching)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task 'requires_cache' requires caching, but was called with "
"`use_cached=False`."):
task.get_dataset({"inputs": 512, "targets": 512}, use_cached=False)
# We haven't actually cached the task, so it still fails but with a
# different error.
with self.assertRaisesWithLiteralMatch(
AssertionError,
"'requires_cache' does not exist in any of the task cache "
"directories."):
task.get_dataset({"inputs": 512, "targets": 512}, use_cached=True)
def test_datasource_prohibits_caching(self):
function_source_no_cache = dataset_providers.FunctionDataSource(
dataset_fn=test_utils.get_fake_dataset,
splits=["train", "validation"],
caching_permitted=False)
with self.assertRaisesWithLiteralMatch(
ValueError,
"Caching was requested for 'prohibits_cache', but the underlying data "
"source prohibits caching. Please remove `CacheDatasetPlaceholder` and "
"try again."
):
dataset_providers.Task(
"prohibits_cache",
output_features=self.DEFAULT_OUTPUT_FEATURES,
source=function_source_no_cache,
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(required=True),
preprocessors.tokenize,
])
def test_cache_exists(self):
self.assertTrue(self.cached_task.cache_dir)
self.cached_task.assert_cached()
self.assertEqual(
os.path.join(self.test_data_dir, "cached_task"),
self.cached_task.cache_dir)
self.assertFalse(self.uncached_task.cache_dir)
with self.assertRaisesWithLiteralMatch(
AssertionError,
"'tfds_task' does not exist in any of the task cache directories."):
TaskRegistry.get("tfds_task").assert_cached()
def test_get_cached_stats(self):
expected_train_stats = {
"examples": 3,
"inputs_tokens": 36, "inputs_max_tokens": 13,
"targets_tokens": 18, "targets_max_tokens": 6}
self.assertEqual(
expected_train_stats,
self.cached_task.get_cached_stats("train"))
# Check repeated call.
self.assertEqual(
expected_train_stats,
self.cached_task.get_cached_stats("train"))
expected_validation_stats = {
"examples": 2,
"inputs_tokens": 23, "inputs_max_tokens": 12,
"targets_tokens": 36, "targets_max_tokens": 21}
self.assertEqual(
expected_validation_stats,
self.cached_task.get_cached_stats("validation"))
with self.assertRaisesWithLiteralMatch(
ValueError, "Stats do not exist for 'cached_task' split: fake"):
self.cached_task.get_cached_stats("fake")
with self.assertRaisesWithLiteralMatch(
AssertionError,
"'uncached_task' does not exist in any of the task cache directories."):
self.uncached_task.get_cached_stats("train")
def test_set_global_cache_dirs(self):
utils.set_global_cache_dirs([])
self.assertFalse(self.cached_task.cache_dir)
utils.set_global_cache_dirs([self.test_data_dir])
self.assertTrue(self.cached_task.cache_dir)
def test_get_dataset_cached(self):
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=True, token_preprocessed=False)
# Test with token preprocessor.
self.cached_task._preprocessors = self.DEFAULT_PREPROCESSORS + (
test_utils.test_token_preprocessor,)
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=True, token_preprocessed=True)
def test_get_dataset_onthefly(self):
self.verify_task_matches_fake_datasets(
"uncached_task", use_cached=False)
# Test with token preprocessor.
self.cached_task._preprocessors = self.DEFAULT_PREPROCESSORS + (
test_utils.test_token_preprocessor,)
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=False, token_preprocessed=True)
def test_get_dataset_no_truncation(self):
self.verify_task_matches_fake_datasets(
"uncached_task", use_cached=False, sequence_length=None)
def test_sharding(self):
for i in range(3):
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=False, num_shards=i,
token_preprocessed=False)
self.verify_task_matches_fake_datasets(
"cached_task", use_cached=True, num_shards=i,
token_preprocessed=False)
def test_feature_validation(self):
default_vocab = test_utils.sentencepiece_vocab()
features = {
"inputs":
dataset_providers.Feature(vocabulary=default_vocab, required=False),
"targets":
dataset_providers.Feature(vocabulary=default_vocab, required=True),
"inputs_rank2":
dataset_providers.Feature(
vocabulary=vocabularies.PassThroughVocabulary(5),
required=False,
rank=2),
"continuous_features":
dataset_providers.ContinuousFeature(
required=False,
rank=2)
}
def _materialize(output):
task = dataset_providers.Task(
"feature_validation_task",
self.function_source,
output_features=features,
preprocessors=(lambda _: tf.data.Dataset.from_tensors(output),),
metric_fns=[],
)
list(
task.get_dataset(
{"inputs": 13, "targets": 13, "inputs_rank2": 13}, "train",
use_cached=False
).as_numpy_iterator()
)
# Missing optional feature: OK
_materialize({"targets": [0]})
# Missing required feature.
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task dataset is missing expected output feature after preprocessing: "
"targets"):
_materialize({"inputs": [0]})
# Wrong type.
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task dataset has incorrect type for feature 'targets' after "
"preprocessing: Got string, expected int32"):
_materialize({"targets": ["wrong type"]})
# Wrong rank.
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task dataset has incorrect rank for feature 'targets' after "
"preprocessing: Got 0, expected 1"):
_materialize({"targets": 0})
# Verify rank > 1 works.
_materialize({"targets": [0], "inputs_rank2": [[0, 0, 0], [0, 0, 0]]})
# Wrong rank (1 when 2 is expected).
with self.assertRaisesWithLiteralMatch(
ValueError,
"Task dataset has incorrect rank for feature 'inputs_rank2' after "
"preprocessing: Got 1, expected 2"):
_materialize({"targets": [0], "inputs_rank2": [0]})
# Test ContinuousFeature
_materialize({
"targets": [0],
"continuous_features": [[1, 1], [0, 1]]
})
def test_value_errors(self):
dataset_fn = (
lambda split, shuffle_files: tf.data.Dataset.from_tensors(["test"]))
output_features = {
"inputs": dataset_providers.Feature(test_utils.sentencepiece_vocab())
}
with self.assertRaisesWithLiteralMatch(
ValueError, "`CacheDatasetPlaceholder` can appear at most once in the "
"preprocessing pipeline. Found 2 in 'multiple_cache_placeholders'."):
dataset_providers.Task(
"multiple_cache_placeholders",
source=dataset_providers.FunctionDataSource(
dataset_fn=dataset_fn,
splits=["train", "validation"]
),
preprocessors=[
test_utils.test_text_preprocessor,
preprocessors.tokenize,
dataset_providers.CacheDatasetPlaceholder(),
test_utils.test_token_preprocessor,
dataset_providers.CacheDatasetPlaceholder()
],
output_features=output_features,
metric_fns=[])
with self.assertRaisesWithLiteralMatch(
ValueError,
"'test_token_preprocessor' has a `sequence_length` argument but occurs "
"before `CacheDatasetPlaceholder` in 'sequence_length_pre_cache'. This "
"is not allowed since the sequence length is specified at run time."):
dataset_providers.Task(
"sequence_length_pre_cache",
dataset_providers.FunctionDataSource(
dataset_fn=dataset_fn,
splits=["train"],
),
preprocessors=[
test_utils.test_text_preprocessor,
preprocessors.tokenize,
test_utils.test_token_preprocessor,
dataset_providers.CacheDatasetPlaceholder()
],
output_features=output_features,
metric_fns=[])
def test_tfds_source_splits(self):
default_splits_src = dataset_providers.TfdsDataSource("fake:0.0.0")
self.assertSameElements(["train", "validation"], default_splits_src.splits)
validation_split_src = dataset_providers.TfdsDataSource(
"fake:0.0.0", splits=["validation"])
self.assertSameElements(["validation"], validation_split_src.splits)
sliced_split_src = dataset_providers.TfdsDataSource(
"fake:0.0.0", splits={"validation": "train[0:1%]"})
self.assertSameElements(["validation"], sliced_split_src.splits)
def test_no_eos(self):
default_vocab = test_utils.sentencepiece_vocab()
features = {
"inputs":
dataset_providers.Feature(add_eos=True, vocabulary=default_vocab),
"targets":
dataset_providers.Feature(add_eos=False, vocabulary=default_vocab),
}
self.add_task("task_no_eos", self.function_source, output_features=features)
self.verify_task_matches_fake_datasets("task_no_eos", use_cached=False)
def test_dtype(self):
default_vocab = test_utils.sentencepiece_vocab()
features = {
"inputs":
# defaults to int32
dataset_providers.Feature(vocabulary=default_vocab),
"targets":
dataset_providers.Feature(dtype=tf.int64, vocabulary=default_vocab),
}
self.add_task(
"task_dtypes",
self.function_source,
preprocessors=self.DEFAULT_PREPROCESSORS + (
utils.map_over_dataset(
lambda x: {k: tf.cast(v, tf.int64) if k == "targets" else v # pylint:disable=g-long-lambda
for k, v in x.items()}
),
),
output_features=features
)
self.verify_task_matches_fake_datasets("task_dtypes", use_cached=False)
def test_num_epochs(self):
# Try repeating after preprocessing the dataset to verify the outputs are
# the same.
epoch1_ds = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
# `random_task` has 3 examples per epoch.
epoch2_ds = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0
).repeat(2).skip(3)
test_utils.assert_datasets_eq(epoch1_ds, epoch2_ds)
# Try repeating before preprocessing the dataset to verify the outputs are
# different.
epoch1_ds = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
# `random_task` has 3 examples per epoch.
epoch2_ds = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0, num_epochs=2
).skip(3)
test_utils.assert_datasets_neq(epoch1_ds, epoch2_ds)
def test_same_seeds_cached_match(self):
dataset1 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=True, seed=0)
dataset2 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=True, seed=0)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_different_seeds_cached_mismatch(self):
dataset1 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=True, seed=0)
dataset2 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=True, seed=42)
test_utils.assert_datasets_neq(dataset1, dataset2)
def test_same_seeds_uncached_match(self):
dataset1 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
dataset2 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_different_seeds_uncached_mismatch(self):
dataset1 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
dataset2 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=42)
test_utils.assert_datasets_neq(dataset1, dataset2)
def test_same_seeds_random_tp_uncached_match(self):
dataset1 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0).repeat(4)
dataset2 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0).repeat(4)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_different_seeds_random_tp_uncached_mismatch(self):
dataset1 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=0)
dataset2 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=True, seed=42)
test_utils.assert_datasets_neq(dataset1, dataset2)
def test_no_shuffle_with_seed_cached_match(self):
dataset1 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=False, seed=0)
dataset2 = self.cached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=False, seed=42)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_no_shuffle_with_seed_uncached_match(self):
dataset1 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=False, seed=0)
dataset2 = self.uncached_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=False, seed=42)
test_utils.assert_datasets_eq(dataset1, dataset2)
def test_no_shuffle_different_seeds_random_tp_uncached_mismatch(self):
dataset1 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=False, seed=0)
dataset2 = self.random_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=False, shuffle=False, seed=42)
test_utils.assert_datasets_neq(dataset1, dataset2)
def test_plaintext_to_pretokenized_rename(self):
ds = self.cached_plaintext_task.get_dataset(
{"inputs": 13, "targets": 13},
split="train", use_cached=True, shuffle=False)
keys = next(ds.as_numpy_iterator()).keys()
self.assertSetEqual(
set(keys),
set(["inputs", "inputs_pretokenized",
"targets", "targets_pretokenized"]))
def test_list_shards(self):
def _get_formatted_shards_list(task_name, split):
shards = dataset_providers.get_mixture_or_task(
task_name).source.list_shards(split)
shards = [s.split("/")[-1] for s in shards]
return sorted(shards)
self.assertListEqual(
_get_formatted_shards_list("tfds_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("text_line_task", "train"),
["train.tsv-00000-of-00002", "train.tsv-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("tf_example_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("proto_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("function_task", "train"), ["train"])
self.assertListEqual(
_get_formatted_shards_list("fully_processed_precache", "train"),
["train"])
self.assertListEqual(
_get_formatted_shards_list("tokenized_postcache", "train"), ["train"])
self.assertListEqual(
_get_formatted_shards_list("random_task", "train"), ["train"])
self.assertListEqual(
_get_formatted_shards_list("uncached_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("cached_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
self.assertListEqual(
_get_formatted_shards_list("cached_plaintext_task", "train"),
["train.tfrecord-00000-of-00002", "train.tfrecord-00001-of-00002"])
class MixturesTest(test_utils.FakeTaskTest):
def test_tasks(self):
self.add_task("task1", self.function_source)
self.add_task("task2", self.function_source)
MixtureRegistry.add("test_mix1", [("task1", 1), ("task2", 1)])
mix = MixtureRegistry.get("test_mix1")
self.assertEqual(len(mix.tasks), 2)
for task in mix.tasks:
self.verify_task_matches_fake_datasets(task.name, use_cached=False)
self.assertEqual(mix.get_rate(task), 1)
def test_num_examples(self):
MixtureRegistry.add("test_mix2", [(self.cached_task.name, 1)])
mix = MixtureRegistry.get("test_mix2")
self.assertEqual(mix.num_input_examples(split="train"), 30)
def test_splits(self):
MixtureRegistry.add(
"test_mix",
[(self.cached_task.name, 1), (self.uncached_task.name, 1)]
)
mix = MixtureRegistry.get("test_mix")
self.assertSameElements(["train", "validation"], mix.splits, 30)
def test_get_dataset(self):
MixtureRegistry.add("test_mix3", [(self.cached_task.name, 1)])
task_ds = TaskRegistry.get_dataset(
self.cached_task.name, {
"inputs": 13,
"targets": 13
},
"validation",
use_cached=False,
shuffle=False)
mix_ds = MixtureRegistry.get("test_mix3").get_dataset(
{
"inputs": 13,
"targets": 13
}, "validation", use_cached=False, shuffle=False)
# mix.get_dataset strips non-output features
task_ds = task_ds.map(lambda x: {k: x[k] for k in ["inputs", "targets"]})
# limit size since get_dataset repeats the dataset
test_utils.assert_datasets_eq(task_ds.repeat(2), mix_ds.take(4))
def test_get_dataset_mix(self):
@utils.map_over_dataset
def _constant_preprocessor(unused_x, val):
return {
"targets": tf.constant([val], tf.int32),
"inputs": tf.constant([val], tf.int32),
}
self.add_task(
"two_task",
self.function_source,
preprocessors=(functools.partial(_constant_preprocessor, val=2),)
)
self.add_task(
"three_task",
self.function_source,
preprocessors=(functools.partial(_constant_preprocessor, val=3),)
)
MixtureRegistry.add("test_mix", [("two_task", 1), ("three_task", 1)])
sequence_length = {"inputs": 2, "targets": 2}
mix_ds = MixtureRegistry.get("test_mix").get_dataset(
sequence_length, "train", seed=13).take(1000)
res = sum(int(item["inputs"][0]) for item in mix_ds.as_numpy_iterator())
self.assertEqual(res, 2481)
def test_get_dataset_passthrough_features(self):
@utils.map_over_dataset
def _constant_feature_preprocessor(unused_x, val):
return {
"targets": tf.constant([val], tf.int32),
"inputs": tf.constant([val], tf.int32),
"feature": tf.constant([val], tf.int32),
}
self.add_task(
"two_task",
self.function_source,
preprocessors=(functools.partial(_constant_feature_preprocessor,
val=2),))
self.add_task(
"three_task",
self.function_source,
preprocessors=(functools.partial(_constant_feature_preprocessor,
val=3),))
MixtureRegistry.add("test_mix", [("two_task", 1), ("three_task", 1)])
sequence_length = {"inputs": 2, "targets": 2}
passthrough_features = ["feature"]
mix_ds = MixtureRegistry.get("test_mix").get_dataset(
sequence_length,
"train",
seed=13,
passthrough_features=passthrough_features).take(1000)
# output features are defined as "inputs" and "targets" by default.
res = sum(int(item["feature"][0]) for item in mix_ds.as_numpy_iterator())
self.assertEqual(res, 2481)
def test_copy_pretokenized(self):
@utils.map_over_dataset
def _constant_preprocessor(unused_x, val):
return {
"targets": tf.constant([val], tf.int32),
"targets_pretokenized": tf.constant(f"targets_{val}"),
"inputs": tf.constant([val], tf.int32),
"inputs_pretokenized": tf.constant(f"inputs_{val}")
}
self.add_task(
"two_task",
self.function_source,
preprocessors=(functools.partial(_constant_preprocessor, val=2),)
)
self.add_task(
"three_task",
self.function_source,
preprocessors=(functools.partial(_constant_preprocessor, val=3),)
)
MixtureRegistry.add("test_mix", [("two_task", 1), ("three_task", 1)])
sequence_length = {"inputs": 2, "targets": 2}
mix_ds = MixtureRegistry.get("test_mix").get_dataset(
sequence_length, "train", seed=13, copy_pretokenized=True).take(1000)
inputs_pretokenized = set(
ex["inputs_pretokenized"] for ex in mix_ds.as_numpy_iterator())
targets_pretokenized = set(
ex["targets_pretokenized"] for ex in mix_ds.as_numpy_iterator())
self.assertCountEqual([b"inputs_2", b"inputs_3"], inputs_pretokenized)
self.assertCountEqual([b"targets_2", b"targets_3"], targets_pretokenized)
mix_ds = MixtureRegistry.get("test_mix").get_dataset(
sequence_length, "train", seed=13, copy_pretokenized=False).take(1000)
for ex in mix_ds.as_numpy_iterator():
self.assertNoCommonElements(
["inputs_pretokenized", "targets_pretokenized"], ex.keys())
def test_get_rate_with_callable(self):
def fn(t):
self.assertEqual(t.name, "task4")
return 42
self.add_task("task4", self.function_source)
task = TaskRegistry.get("task4")
MixtureRegistry.add("test_mix5", [("task4", fn)])
mix = MixtureRegistry.get("test_mix5")
self.assertEqual(mix.get_rate(task), 42)
def test_mixture_of_mixtures(self):
self.add_task("task_a", self.function_source)
self.add_task("task_b", self.function_source)
self.add_task("task_c", self.function_source)
MixtureRegistry.add("another_mix", [("task_a", 1), ("task_b", 1)])
MixtureRegistry.add("supermix", [("another_mix", 1), ("task_c", 1)])
supermix = MixtureRegistry.get("supermix")
names = [task.name for task in supermix.tasks]
self.assertEqual(names, ["task_a", "task_b", "task_c"])
self.assertEqual([supermix.get_rate(t) for t in supermix.tasks],
[0.5, 0.5, 1])
def test_mixture_of_mixtures_dupe(self):
self.add_task("task2_a", self.function_source)
self.add_task("task2_b", self.function_source)
self.add_task("task2_c", self.function_source)
MixtureRegistry.add("yet_another_mix", [("task2_a", 1), ("task2_b", 1)])
MixtureRegistry.add("supermix_with_dupe", [("yet_another_mix", 1),
("task2_a", 1), ("task2_c", 1)])
supermix = MixtureRegistry.get("supermix_with_dupe")
names = [task.name for task in supermix.tasks]
self.assertEqual(names, ["task2_a", "task2_b", "task2_c"])
self.assertEqual([supermix.get_rate(t) for t in supermix.tasks],
[1.5, 0.5, 1])
def test_mixture_with_sample_fn(self):
def sequential_intereave(datasets: Sequence[tf.data.Dataset],
rates: Sequence[float],
sample_seed: Optional[int]) -> tf.data.Dataset:
"""Sample function that simply concatenates two datasets."""
del rates, sample_seed
return datasets[0].concatenate(datasets[1])
def gen_dataset(split,
shuffle_files=False,
seed=None,
val: str = "") -> tf.data.Dataset:
del split, shuffle_files, seed # Need this to pass arg validation.
return tf.data.Dataset.from_tensor_slices({
"inputs": [[val]] * 3,
})
# Register two very simple tasks, each with 3 repeated string values.
vocab = vocabularies.PassThroughVocabulary(0)
tasks = []
for task_name in ["first", "second"]:
tasks.append(self.add_task(
task_name,
dataset_providers.FunctionDataSource(
dataset_fn=functools.partial(gen_dataset, val=task_name),
splits=["train"]),
preprocessors=[],
output_features={
"inputs": dataset_providers.Feature(vocab, dtype=tf.string)
}))
# Verify that by default, interleaving of datasets is random.
MixtureRegistry.add("default_mix", [("first", 1), ("second", 1)])
default_ds = MixtureRegistry.get("default_mix").get_dataset(
None, "train", shuffle=False, seed=2, num_epochs=1)
expected = [b"second", b"first", b"second", b"first", b"second", b"first"]
actual = [x["inputs"] for x in default_ds.as_numpy_iterator()]
self.assertEqual(expected, actual)
# Verify that we can modify sampling function correctly.
MixtureRegistry.add(
"sequential_mix", [("first", 1), ("second", 1)],
sample_fn=sequential_intereave)
sequential_ds = MixtureRegistry.get("sequential_mix").get_dataset(
None, "train", shuffle=False, seed=2, num_epochs=1)
expected = [b"first"] * 3 + [b"second"] * 3
actual = [x["inputs"] for x in sequential_ds.as_numpy_iterator()]
self.assertEqual(expected, actual)
class GetDatasetTest(parameterized.TestCase, tf.test.TestCase):
def test_get_dataset_enc_dec_unpacked(self):
mixture_or_task_name = "enc_dec_unpacked"
x = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6, 5]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
converter = feature_converters.EncDecFeatureConverter(pack=False)
output_ds = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split="train",
shuffle=False,
feature_converter=converter)
expected = [{
"encoder_input_tokens": [7, 8, 5, 6, 9, 4, 1],
"decoder_target_tokens": [3, 9, 1, 0, 0],
"decoder_input_tokens": [0, 3, 9, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}, {
"encoder_input_tokens": [8, 4, 1, 0, 0, 0, 0],
"decoder_target_tokens": [4, 1, 0, 0, 0],
"decoder_input_tokens": [0, 4, 1, 0, 0],
"decoder_loss_weights": [1, 1, 0, 0, 0],
}, {
"encoder_input_tokens": [5, 6, 7, 1, 0, 0, 0],
"decoder_target_tokens": [6, 5, 1, 0, 0],
"decoder_input_tokens": [0, 6, 5, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
@parameterized.parameters(
dict(
task_name="enc_dec_partial_trim_both",
task_feature_lengths={
"inputs": 7,
"targets": 2
},
expect_trim_inputs=True,
expect_trim_targets=True),
dict(
task_name="enc_dec_partial_trim_targets",
task_feature_lengths={
"inputs": None,
"targets": 2
},
expect_trim_inputs=False,
expect_trim_targets=True),
dict(
task_name="enc_dec_partial_trim_inputs",
task_feature_lengths={
"inputs": 7,
"targets": None
},
expect_trim_inputs=True,
expect_trim_targets=False),
dict(
task_name="enc_dec_partial_trim_neither",
task_feature_lengths={
"inputs": None,
"targets": None
},
expect_trim_inputs=False,
expect_trim_targets=False),
dict(
task_name="enc_dec_partial_trim_nothing",
task_feature_lengths=None,
expect_trim_inputs=False,
expect_trim_targets=False))
def test_partial_sequence_length(self, task_name, task_feature_lengths,
expect_trim_inputs, expect_trim_targets):
x = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6, 5]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(task_name, dataset_fn=dataset_fn)
# Unlike the other tests, don't use a feature converter. Instead, test the
# task.get_dataset method directly, which is similar to how evaluation.py
# infers feature lengths w/trimming.
task = dataset_providers.get_mixture_or_task(task_name)
output_ds = task.get_dataset(
sequence_length=task_feature_lengths,
shuffle=False)
expected = [{
"inputs": [7, 8, 5, 6, 9, 4, 3, 1],
"targets": [3, 9, 1],
}, {
"inputs": [8, 4, 1],
"targets": [4, 1],
}, {
"inputs": [5, 6, 7, 1],
"targets": [6, 5, 1],
}]
if expect_trim_inputs:
expected[0]["inputs"] = [7, 8, 5, 6, 9, 4, 1]
if expect_trim_targets:
expected[0]["targets"] = [3, 1]
expected[2]["targets"] = [6, 1]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
@parameterized.parameters(
dict(
task_name="enc_dec_multidim_trim_both",
task_feature_lengths={
"inputs": (2, 5),
"targets": 2
},
expect_trim_inputs=True,
expect_trim_targets=True,
),
dict(
task_name="enc_dec_multidim_trim_inputs",
task_feature_lengths={
"inputs": (2, 5),
"targets": None
},
expect_trim_inputs=True,
expect_trim_targets=False,
),
dict(
task_name="enc_dec_multidim_trim_targets",
task_feature_lengths={
"inputs": None,
"targets": 2
},
expect_trim_inputs=False,
expect_trim_targets=True,
),
dict(
task_name="enc_dec_no_multidim_trim",
task_feature_lengths={
"inputs": None,
"targets": None
},
expect_trim_inputs=False,
expect_trim_targets=False
)
)
def test_multidimension_sequence_length(self,
task_name,
task_feature_lengths,
expect_trim_inputs,
expect_trim_targets):
x = [{"inputs": [[7, 8, 5, 6, 9, 4, 3],
[2, 3, 4, 5, 0, 0, 0],
[6, 7, 1, 0, 0, 0, 0]],
"targets": [3, 9]},
{"inputs": [[8, 4],
[1, 0],
[2, 3]],
"targets": [4]},
{"inputs": [[5, 6, 7]],
"targets": [6, 5, 1]},
{"inputs": [[7, 8, 9, 1, 2, 3, 4, 5, 6]],
"targets": [10, 11, 1]}]
ds = tf.data.Dataset.from_generator(
lambda: x,
output_types={"inputs": tf.int32, "targets": tf.int32},
output_shapes={"inputs": (None, None), "targets": (None,)})
dataset_fn = lambda split, shuffle_files: ds
dataset_providers.TaskRegistry.add(
task_name,
source=dataset_providers.FunctionDataSource(
dataset_fn=dataset_fn, splits=["train", "validation"]),
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(),
],
output_features={
"inputs": dataset_providers.Feature(
test_utils.sentencepiece_vocab(), rank=2),
"targets": dataset_providers.Feature(
test_utils.sentencepiece_vocab())
},
metric_fns=[])
# Unlike the other tests, don't use a feature converter. Instead, test the
# task.get_dataset method directly, which is similar to how evaluation.py
# infers feature lengths w/trimming.
task = dataset_providers.get_mixture_or_task(task_name)
output_ds = task.get_dataset(
sequence_length=task_feature_lengths,
shuffle=False)
expected = copy.deepcopy(x)
if expect_trim_inputs:
expected[0]["inputs"] = [[7, 8, 5, 6, 9],
[2, 3, 4, 5, 0]]
expected[1]["inputs"] = [[8, 4],
[1, 0]]
expected[3]["inputs"] = [[7, 8, 9, 1, 2]]
if expect_trim_targets:
expected[2]["targets"] = [6, 5]
expected[3]["targets"] = [10, 11]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
def test_get_dataset_enc_dec_packed(self):
mixture_or_task_name = "enc_dec_packed"
x = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6, 5]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
converter = feature_converters.EncDecFeatureConverter(pack=True)
output_ds = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split="train",
shuffle=False,
feature_converter=converter)
expected = [{
# Example 1 is trimmed
"encoder_input_tokens": [7, 8, 5, 6, 9, 4, 1],
"encoder_segment_ids": [1, 1, 1, 1, 1, 1, 1],
"encoder_positions": [0, 1, 2, 3, 4, 5, 6],
"decoder_target_tokens": [3, 9, 1, 0, 0],
"decoder_input_tokens": [0, 3, 9, 0, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
"decoder_segment_ids": [1, 1, 1, 0, 0],
"decoder_positions": [0, 1, 2, 0, 0],
}, {
# Example 2 and 3 are packed together
"encoder_input_tokens": [8, 4, 1, 5, 6, 7, 1],
"encoder_segment_ids": [1, 1, 1, 2, 2, 2, 2],
"encoder_positions": [0, 1, 2, 0, 1, 2, 3],
"decoder_target_tokens": [4, 1, 6, 5, 1],
"decoder_input_tokens": [0, 4, 0, 6, 5],
"decoder_loss_weights": [1, 1, 1, 1, 1],
"decoder_segment_ids": [1, 1, 2, 2, 2],
"decoder_positions": [0, 1, 0, 1, 2],
}]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
def test_get_dataset_both_train_and_validation_splits(self):
mixture_or_task_name = "both_train_and_validation_splits"
x_train = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]}]
x_val = [{"inputs": [8, 4], "targets": [4]}]
datasets = {
"train": create_default_dataset(x_train),
"validation": create_default_dataset(x_val)
}
dataset_fn = lambda split, shuffle_files: datasets[split]
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
output_ds = {}
for split in ["train", "validation"]:
converter = feature_converters.EncDecFeatureConverter(pack=False)
output_ds[split] = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split=split,
shuffle=False,
feature_converter=converter)
expected_train = {
"encoder_input_tokens": [7, 8, 5, 6, 9, 4, 1],
"decoder_target_tokens": [3, 9, 1, 0, 0],
"decoder_input_tokens": [0, 3, 9, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}
expected_val = {
"encoder_input_tokens": [8, 4, 1, 0, 0, 0, 0],
"decoder_target_tokens": [4, 1, 0, 0, 0],
"decoder_input_tokens": [0, 4, 1, 0, 0],
"decoder_loss_weights": [1, 1, 0, 0, 0],
}
expected_dtypes = {feat: tf.int32 for feat in expected_train.keys()}
assert_dataset(
output_ds["train"], expected_train, expected_dtypes=expected_dtypes)
assert_dataset(
output_ds["validation"], expected_val, expected_dtypes=expected_dtypes)
def test_get_dataset_enc_dec_sharded(self):
mixture_or_task_name = "enc_dec_sharded"
x = [{"inputs": [7, 8, 5, 6, 9, 4, 3], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6, 5]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
converter = feature_converters.EncDecFeatureConverter(pack=False)
shard_info = dataset_providers.ShardInfo(index=0, num_shards=2)
output_ds = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split="train",
shuffle=False,
feature_converter=converter,
shard_info=shard_info)
# Example index 1 should not be present in the sharded dataset.
expected = [{
"encoder_input_tokens": [7, 8, 5, 6, 9, 4, 1],
"decoder_target_tokens": [3, 9, 1, 0, 0],
"decoder_input_tokens": [0, 3, 9, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}, {
"encoder_input_tokens": [5, 6, 7, 1, 0, 0, 0],
"decoder_target_tokens": [6, 5, 1, 0, 0],
"decoder_input_tokens": [0, 6, 5, 1, 0],
"decoder_loss_weights": [1, 1, 1, 0, 0],
}]
expected_dtypes = {feat: tf.int32 for feat in expected[0].keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
def test_get_dataset_enc_dec_sharded_and_packed(self):
mixture_or_task_name = "enc_dec_sharded_and_packed"
x = [{"inputs": [7, 8], "targets": [3, 9]},
{"inputs": [8, 4], "targets": [4]},
{"inputs": [5, 6, 7], "targets": [6]}]
ds = create_default_dataset(x)
dataset_fn = lambda split, shuffle_files: ds
register_dummy_task(mixture_or_task_name, dataset_fn=dataset_fn)
task_feature_lengths = {"inputs": 7, "targets": 5}
converter = feature_converters.EncDecFeatureConverter(pack=True)
shard_info = dataset_providers.ShardInfo(index=0, num_shards=2)
output_ds = dataset_providers.get_dataset(
mixture_or_task_name=mixture_or_task_name,
task_feature_lengths=task_feature_lengths,
dataset_split="train",
shuffle=False,
feature_converter=converter,
shard_info=shard_info)
# Packing should be done after the sharding.
expected = {
"encoder_input_tokens": [7, 8, 1, 5, 6, 7, 1],
"encoder_segment_ids": [1, 1, 1, 2, 2, 2, 2],
"encoder_positions": [0, 1, 2, 0, 1, 2, 3],
"decoder_target_tokens": [3, 9, 1, 6, 1],
"decoder_input_tokens": [0, 3, 9, 0, 6],
"decoder_loss_weights": [1, 1, 1, 1, 1],
"decoder_segment_ids": [1, 1, 1, 2, 2],
"decoder_positions": [0, 1, 2, 0, 1],
}
expected_dtypes = {feat: tf.int32 for feat in expected.keys()}
assert_dataset(output_ds, expected, expected_dtypes=expected_dtypes)
def register_dummy_task(
task_name: str,
dataset_fn: Callable[[str, str], tf.data.Dataset],
output_feature_names: Sequence[str] = ("inputs", "targets")) -> None:
"""Register a dummy task for GetDatasetTest."""
dataset_providers.TaskRegistry.add(
task_name,
source=dataset_providers.FunctionDataSource(
dataset_fn=dataset_fn, splits=["train", "validation"]),
preprocessors=[
dataset_providers.CacheDatasetPlaceholder(),
preprocessors.append_eos_after_trim,
],
output_features={
feat: dataset_providers.Feature(test_utils.sentencepiece_vocab())
for feat in output_feature_names
},
metric_fns=[])
if __name__ == "__main__":
absltest.main()
| 38.483539 | 107 | 0.647989 | [
"Apache-2.0"
] | 00mjk/seqio | seqio/dataset_providers_test.py | 56,109 | Python |
from copy import copy
from typing import Optional
import torch
import pytorch_lightning as pl
from transformers import (
EncoderDecoderModel,
RobertaModel,
RobertaConfig,
GPT2LMHeadModel,
GPT2Config,
RobertaTokenizer,
GPT2Tokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import nltk
nltk.download("wordnet")
class EncoderDecoderModule(pl.LightningModule):
def __init__(
self,
learning_rate: float,
src_tokenizer: RobertaTokenizer,
trg_tokenizer: GPT2Tokenizer,
num_epochs: int,
num_batches: int,
num_gpus: int,
num_layers_encoder: Optional[int] = None,
num_layers_decoder: Optional[int] = None,
encoder_name_or_path: Optional[str] = None,
decoder_name_or_path: Optional[str] = None,
**kwargs,
):
super().__init__()
self._src_tokenizer = src_tokenizer
self._trg_tokenizer = trg_tokenizer
self._num_epochs = num_epochs
self._num_batches = num_batches
self._num_gpus = num_gpus
self.learning_rate = learning_rate
self.save_hyperparameters()
if encoder_name_or_path is not None and decoder_name_or_path is not None:
# use pretrained RoBERTa as encoder
encoder = RobertaModel.from_pretrained(encoder_name_or_path)
# resize embeddings to match vocabulary size
encoder.resize_token_embeddings(len(self._src_tokenizer))
# remove layers if necessary
if num_layers_encoder is not None and num_layers_encoder < encoder.config.num_hidden_layers:
encoder = EncoderDecoderModule.remove_layers_from_model(encoder, num_layers_encoder, is_gpt=False)
# use pretrained GPT-2 as decoder
config = GPT2Config.from_pretrained(decoder_name_or_path)
config.is_decoder = True
config.add_cross_attention = True
decoder = GPT2LMHeadModel.from_pretrained(decoder_name_or_path, config=config)
# remove layers if necessary
if num_layers_decoder is not None and num_layers_decoder < decoder.config.n_layer:
decoder = EncoderDecoderModule.remove_layers_from_model(decoder, num_layers_decoder, is_gpt=True)
elif num_layers_decoder is not None and num_layers_encoder is not None:
# use randomly initialized RoBERTa as encoder
encoder_config = RobertaConfig()
encoder_config.num_hidden_layers = num_layers_encoder
encoder = RobertaModel(config=encoder_config)
# resize embeddings to match vocabulary size
encoder.resize_token_embeddings(len(self._src_tokenizer))
# use randomly initialized GPT-2 as decoder
decoder_config = GPT2Config()
decoder_config.n_layer = num_layers_decoder
decoder_config.is_decoder = True
decoder_config.add_cross_attention = True
decoder = GPT2LMHeadModel(config=decoder_config)
else:
raise ValueError(
"You have to specify either num_layers for training from scratch \
or paths for loading pretrained models"
)
self.model = EncoderDecoderModel(encoder=encoder, decoder=decoder)
# cache is currently not supported by EncoderDecoder framework
self.model.decoder.config.use_cache = False
# do not tie output embeddings to input embeddings
self.model.config.tie_word_embeddings = False
# to make logs for different batch sizes prettier
self.examples_count = 0
def forward(self, batch):
return self.model(
input_ids=batch["diff_input_ids"],
attention_mask=batch["diff_attention_mask"],
decoder_input_ids=batch["msg_input_ids"],
decoder_attention_mask=batch["msg_attention_mask"],
labels=batch["msg_labels"],
)
def training_step(self, batch, batch_idx):
self.examples_count += len(batch["diff_input_ids"])
loss, logits = self(batch)[:2]
self.logger.experiment.log({"train_loss_step": loss}, step=self.examples_count)
return {"loss": loss}
def training_epoch_end(self, outputs):
train_loss_mean = torch.stack([x["loss"] for x in outputs]).mean()
self.logger.experiment.log({"train_loss_epoch": train_loss_mean}, step=self.examples_count)
def next_token_metrics_step(self, batch):
loss, scores = self(batch)[:2]
return {"loss": loss}
def next_token_metrics_epoch_end(self, outputs, stage):
"""
Logic for validation & testing epoch end:
1) Calculate accuracy@1, accuracy@5, MRR@5
2) (in val stage only) Aggregate loss and log metric(s) for ModelCheckpoint
3) Log everything to wandb
"""
loss = torch.stack([x["loss"] for x in outputs]).mean()
metrics = {f"{stage}_loss_epoch": loss}
if stage == "val":
self.log("val_loss_epoch", metrics["val_loss_epoch"], on_step=False, on_epoch=True, prog_bar=True, logger=False)
self.logger.experiment.log(metrics, step=self.examples_count)
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self.next_token_metrics_step(batch)
def validation_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="val")
def test_step(self, batch, batch_idx):
return self.next_token_metrics_step(batch)
def test_epoch_end(self, outputs):
self.next_token_metrics_epoch_end(outputs, stage="test")
def configure_optimizers(self):
optimizer = AdamW(self.parameters(), lr=self.learning_rate)
scheduler = {
"scheduler": get_linear_schedule_with_warmup(
optimizer, 4000 // self._num_gpus, self._num_epochs * self._num_batches
),
"interval": "step",
"frequency": 1,
}
return [optimizer], [scheduler]
@staticmethod
def remove_layers_from_model(teacher, num_layers, is_gpt):
if not is_gpt:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.num_hidden_layers = num_layers
student = RobertaModel(config=student_config)
# copy all embeddings
student.embeddings.word_embeddings = teacher.embeddings.word_embeddings
student.embeddings.position_embeddings = teacher.embeddings.position_embeddings
student.embeddings.token_type_embeddings = teacher.embeddings.token_type_embeddings
student.embeddings.LayerNorm = teacher.embeddings.LayerNorm
student.embeddings.dropout = teacher.embeddings.dropout
# uniformly pick from middle layers from teacher
# it is basically np.linspace(0, teacher_config.num_hidden_layers,
# num=student_config.num_hidden_layers, endpoint=True)
step = (teacher_config.num_hidden_layers - 1) / (student_config.num_hidden_layers - 1)
for student_layer, teacher_layer in enumerate(
int(i * step) for i in range(student_config.num_hidden_layers)
):
student.encoder.layer[student_layer] = teacher.encoder.layer[teacher_layer]
else:
teacher_config = teacher.config
student_config = copy(teacher.config)
student_config.n_layer = num_layers
student = GPT2LMHeadModel(config=student_config)
# Copying all embeddings
student.transformer.wte = teacher.transformer.wte
student.transformer.wpe = teacher.transformer.wpe
student.transformer.drop = teacher.transformer.drop
# Maybe there is something else in BERT that need to be copied!
# Specific thing for GPT2LMHead. Not necessary for BERT
student.tie_weights()
# Uniformly pick from middle layers from teacher
# It is basically np.linspace(0, teacher_config.n_layer, num=student_config.n_layer, endpoint=True)
step = (teacher_config.n_layer - 1) / (student_config.n_layer - 1)
for student_layer, teacher_layer in enumerate(int(i * step) for i in range(student_config.n_layer)):
student.transformer.h[student_layer] = teacher.transformer.h[teacher_layer]
return student
| 42.313433 | 124 | 0.663139 | [
"MIT"
] | saridormi/commit_message_generation | src/model/encoder_decoder_module.py | 8,505 | Python |
from soup import soup_collector
def name_collector(spl_id, spl_type):
soup = soup_collector(spl_id, spl_type)
sample_info_type = soup.findAll('a')
#unwanted till now START
try:
sample_info_name1 = sample_info_type[0].get('name').split('_')[1].strip()
sample_info_name2 = sample_info_type[0].get('name').split('_')[2].strip()
sample_info_name = sample_info_name1 + "_" + sample_info_name2
except:
sample_info_name = sample_info_type[0].get('name').split('_')[1].strip()
return sample_info_name
#END
#intro
| 33.588235 | 81 | 0.677758 | [
"MIT"
] | 0x0is1/drhelp | src/name_collect.py | 571 | Python |
import os
import numpy as np
from glob import glob
from scipy import optimize, spatial, ndimage
from tifffile import imread, imsave
from skimage.segmentation import find_boundaries
from skimage.morphology import remove_small_objects
from skimage.draw import line
from utils import random_colormap
import pdb
# define binarization function
def prepare_binary(fn):
# generate binary segmentaiton result
seg = np.squeeze(imread(fn)) > bw_th
seg = remove_small_objects(seg>0, min_size=min_obj_size)
return seg
# params
max_matching_dist = 45
approx_inf = 65535
track_display_legnth = 20
min_obj_size = 20
bw_th = -0.5
parent_path = "/mnt/data/"
all_movies = glob(parent_path + "timelapse/*.tiff")
for M_idx, movies in enumerate(all_movies):
movie_basename = os.path.basename(movies)
well_name = movie_basename[:-5]
seg_path = f"{parent_path}timelapse_seg/{well_name}/"
# vis_path = f"{parent_path}timelapse_track/{well_name}"
# os.makedirs(vis_path, exist_ok=True)
raw_path = f"{parent_path}timelapse/{well_name}"
track_result = f"{parent_path}timelapse_track/{well_name}_result.npy"
total_time = len(glob(raw_path + "/*.tiff"))
traj = dict()
lineage = dict()
for tt in range(total_time):
seg_fn = seg_path + f"img_{tt}_segmentation.tiff"
seg = prepare_binary(seg_fn)
# get label image
seg_label, num_cells = ndimage.label(seg)
# calculate center of mass
centroid = ndimage.center_of_mass(seg, labels=seg_label, index=np.arange(1, num_cells + 1))
# generate cell information of this frame
traj.update({
tt : {"centroid": centroid, "parent": [], "child": [], "ID": []}
})
# initialize trajectory ID, parent node, track pts for the first frame
max_cell_id = len(traj[0].get("centroid"))
traj[0].update(
{"ID": np.arange(0, max_cell_id, 1)}
)
traj[0].update(
{"parent": -1 * np.ones(max_cell_id, dtype=int)}
)
centers = traj[0].get("centroid")
pts = []
for ii in range(max_cell_id):
pts.append([centers[ii]])
lineage.update({ii: [centers[ii]]})
traj[0].update({"track_pts": pts})
for tt in np.arange(1, total_time):
p_prev = traj[tt-1].get("centroid")
p_next = traj[tt].get("centroid")
###########################################################
# simple LAP tracking
###########################################################
num_cell_prev = len(p_prev)
num_cell_next = len(p_next)
# calculate distance between each pair of cells
cost_mat = spatial.distance.cdist(p_prev, p_next)
# if the distance is too far, change to approx. Inf.
cost_mat[cost_mat > max_matching_dist] = approx_inf
# add edges from cells in previous frame to auxillary vertices
# in order to accomendate segmentation errors and leaving cells
cost_mat_aug = max_matching_dist * 1.2 * np.ones(
(num_cell_prev, num_cell_next + num_cell_prev), dtype=float
)
cost_mat_aug[:num_cell_prev, :num_cell_next] = cost_mat[:, :]
# solve the optimization problem
row_ind, col_ind = optimize.linear_sum_assignment(cost_mat_aug)
#########################################################
# parse the matching result
#########################################################
prev_child = np.ones(num_cell_prev, dtype=int)
next_parent = np.ones(num_cell_next, dtype=int)
next_ID = np.zeros(num_cell_next, dtype=int)
next_track_pts = []
# assign child for cells in previous frame
for ii in range(num_cell_prev):
if col_ind[ii] >= num_cell_next:
prev_child[ii] = -1
else:
prev_child[ii] = col_ind[ii]
# assign parent for cells in next frame, update ID and track pts
prev_pt = traj[tt-1].get("track_pts")
prev_id = traj[tt-1].get("ID")
for ii in range(num_cell_next):
if ii in col_ind:
# a matched cell is found
next_parent[ii] = np.where(col_ind == ii)[0][0]
next_ID[ii] = prev_id[next_parent[ii]]
current_pts = prev_pt[next_parent[ii]].copy()
current_pts.append(p_next[ii])
if len(current_pts) > track_display_legnth:
current_pts.pop(0)
next_track_pts.append(current_pts)
# attach this point to the lineage
single_lineage = lineage.get(next_ID[ii])
try:
single_lineage.append(p_next[ii])
except Exception:
pdb.set_trace()
lineage.update({next_ID[ii]: single_lineage})
else:
# a new cell
next_parent[ii] = -1
next_ID[ii] = max_cell_id
next_track_pts.append([p_next[ii]])
lineage.update({max_cell_id: [p_next[ii]]})
max_cell_id += 1
# update record
traj[tt-1].update({"child": prev_child})
traj[tt].update({"parent": next_parent})
traj[tt].update({"ID": next_ID})
traj[tt].update({"track_pts": next_track_pts})
np.save(track_result, [traj, lineage])
"""
######################################################
# generate track visualization
######################################################
cmap = random_colormap()
for tt in range(total_time):
# print(traj[tt].get("ID"))
# load segmentation and extract contours
seg_fn = seg_path + f"img_{tt}_segmentation.tiff"
seg = prepare_binary(seg_fn)
seg_label, num_cells = ndimage.label(seg)
cell_contours = find_boundaries(seg, mode='inner').astype(np.uint16)
cell_contours[cell_contours > 0] = 1
cell_contours = cell_contours * seg_label.astype(np.uint16)
cell_contours = cell_contours - 1 # to make the first object has label 0, to match index
# load raw image and create visualizaiton in RGB
# TODO: use real raw images
# raw = seg.astype(np.uint8)
raw = np.squeeze(imread(raw_path + f"img_{tt}.tiff")).astype(np.float32)
raw = (raw - raw.min())/ (raw.max() - raw.min())
raw = raw * 255
raw = raw.astype(np.uint8)
vis = np.zeros((raw.shape[0], raw.shape[1], 3), dtype=np.uint8)
for cc in range(3):
vis[:, :, cc] = raw
# loop through all cells, for each cell, we do the following
# 1- find ID, 2- load the color, 3- draw contour 4- draw track
cell_id = traj[tt].get("ID")
pts = traj[tt].get("track_pts")
for cid in range(num_cells):
# find ID
this_id = cell_id[cid]
# load the color
this_color = 255 * cmap.colors[this_id]
this_color = this_color.astype(np.uint8)
# draw contour
for cc in range(3):
vis_c = vis[:, :, cc]
vis_c[cell_contours == cid] = this_color[cc]
vis[:, :, cc] = vis_c # TODO: check if we need this line
# draw track
this_track = pts[cid]
if len(this_track) < 2:
continue
else:
for pid in range(len(this_track) - 1):
p1 = this_track[pid]
p2 = this_track[pid + 1]
rr, cc = line(int(round(p1[0])), int(round(p1[1])), int(round(p2[0])), int(round(p2[1])))
for ch in range(3):
vis[rr, cc ,ch] = this_color[ch]
imsave(vis_path + f"img_{tt+1}.tiff", vis)
"""
| 35.741784 | 105 | 0.574544 | [
"BSD-2-Clause"
] | MMV-Lab/cell_movie_analysis | run_tracking.py | 7,613 | Python |
from __future__ import print_function
import sys
import datetime
import random
def main(n):
start = -86400 * 365 * 20
end = 86400 * 365
filename = 'testdata-' + str(n) + '.txt'
with open(filename, 'w') as fp:
now = datetime.datetime.now()
for i in range(n):
d = datetime.timedelta(seconds=random.randint(start, end))
nd = now + d
fp.write(nd.strftime("%d/%m/%Y %H:%M:%S") + '\n')
print('generate finish {}\n'.format(filename))
if __name__ == '__main__':
if not(len(sys.argv) == 2 and sys.argv[1].isdigit()):
print('bad input, argument must be number\n')
exit()
n = int(sys.argv[1])
main(n)
| 26.692308 | 70 | 0.573487 | [
"MIT"
] | Paradise02/Interviews | Beijin-Tuiwen/Python/gen_data.py | 694 | Python |
import ast
import csv
import logging
import math
import os
from nose_parameterized import parameterized
import numpy
import SimpleITK as sitk
import six
from radiomics import getTestCase, imageoperations
# Get the logger. This is done outside the class, as it is needed by both the class and the custom_name_func
logger = logging.getLogger('radiomics.testing')
TEST_CASES = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2')
def custom_name_func(testcase_func, param_num, param):
"""
A custom test name function that will ensure that the tests are run such that they're batched with all tests for a
given data set are run together, avoiding re-reading the data more than necessary. Tests are run in alphabetical
order, so put the test case first. An alternate option is to right justify the test number (param_num) with zeroes
so that the numerical and alphabetical orders are the same. Not providing this method when there are more than 10
tests results in tests running in an order similar to:
test_*.test_scenario_0_*
test_*.test_scenario_10_*
test_*.test_scenario_11_*
...
test_*.test_scenario_19_*
test_*.test_scenario_1_*
test_*.test_scenario_20_*
"""
global logger
logger.debug('custom_name_func: function name = %s, param_num = {0:0>3}, param.args = %s'.format(param_num),
testcase_func.__name__, param.args)
return str("%s_%s" % (
testcase_func.__name__,
parameterized.to_safe_name("_".join(str(x) for x in param.args)),
))
class RadiomicsTestUtils:
"""
This utility class reads in and stores the baseline files stored in 'data\baseline' (one per feature class)
It provides utility methods to get the baseline feature value for a feature class and compare it to the result generated
by the test.
"""
def __init__(self):
self._logger = logging.getLogger('radiomics.testing.utils')
self._logger.debug('RadiomicsTestUtils')
# the image and mask volumes
self._image = None
self._mask = None
self._current_image = None
self._current_mask = None
self._bb = None
self._imageType = None
# set up file paths
self._dataDir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "data")
self._baselineDir = os.path.join(self._dataDir, 'baseline')
self._tests = set()
self._test = None # Test, specifies an image and mask and some configuration (settings)
self._testCase = None # Test image and mask to use in configured test
self._testedSet = set()
self._baseline = {}
self.readBaselineFiles()
self._current_config = {}
self._featureClassName = None
self._results = {}
self._diffs = {}
for test in self.getTests():
self._results[test] = {}
self._diffs[test] = {}
def readBaselineFiles(self):
"""
Reads the 'baseline' folder contained in dataDir. All files starting with 'baseline_' are read as baseline files.
These files should therefore be named as follows: 'baseline_<className>.csv'.
"""
baselineFiles = [fileName for fileName in os.listdir(self._baselineDir)
if os.path.isfile(os.path.join(self._baselineDir, fileName)) and fileName.startswith('baseline_')]
assert len(baselineFiles) > 0
for baselineFile in baselineFiles:
newBaseline = PyRadiomicsBaseline.readBaselineFile(os.path.join(self._baselineDir, baselineFile))
cls = newBaseline.cls
self._logger.debug('Read baseline for class %s', cls)
self._baseline[cls] = newBaseline
self._tests |= newBaseline.tests
def getTests(self):
"""
Return all the tests for which there are baseline information.
"""
return self._tests
def getFeatureNames(self, className, test):
"""
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names (without image type and feature class specifiers, i.e. just the feature name).
"""
if className not in self._baseline:
return None # No baseline available for specified class
return self._baseline[className].getTestFeatures(test)
def setFeatureClassAndTestCase(self, className, test):
"""
Set testing suite to specified testCase and feature class. Throws an assertion error if either class or test case
are not recognized. These have to be set here together, as the settings with which the test case has to be loaded
are defined per feature class in the baseline (extracted from provenance information).
Only (re)loads an image/mask if the test case has changed, or the change of feature class causes a change in test
settings.
If feature class and test case are unchanged, nothing is reloaded and function returns False. If either feature
class or test case is changed, function returns True.
"""
global TEST_CASES
if self._featureClassName == className and self._test == test:
return False
self._test = test
self._testedSet.add(self._test)
# First set featureClass if necessary, because if settings have changed, testCase needs te be reloaded
if self._featureClassName != className:
self._logger.debug('Setting feature class name to %s', className)
assert className in self._baseline.keys() # Check if a baseline has been read for this class
self._featureClassName = className
# Check if test settings have changed
if self._current_config != self._baseline[className].getTestConfig(test):
self._current_config = self._baseline[className].getTestConfig(test)
self._testCase = None # forces image to be reloaded (as settings have changed)
# Next, set testCase if necessary
if self._testCase != self._current_config['TestCase']:
self._testCase = self._current_config['TestCase']
self._logger.info("Reading the image and mask for test case %s", self._testCase)
assert self._current_config['TestCase'] in TEST_CASES
imageName, maskName = getTestCase(self._testCase)
assert imageName is not None
assert maskName is not None
self._image = sitk.ReadImage(imageName)
self._mask = sitk.ReadImage(maskName)
if 'ImageHash' in self._current_config:
assert sitk.Hash(self._image) == self._current_config['ImageHash']
if 'MaskHash' in self._current_config:
assert sitk.Hash(self._mask) == self._current_config['MaskHash']
settings = self._current_config.get('Settings', {})
interpolator = settings.get('interpolator', sitk.sitkBSpline)
resampledPixelSpacing = settings.get('resampledPixelSpacing', None)
if interpolator is not None and resampledPixelSpacing is not None:
self._image, self._mask = imageoperations.resampleImage(self._image,
self._mask,
resampledPixelSpacing,
interpolator,
settings.get('label', 1),
settings.get('padDistance', 5))
self._bb, correctedMask = imageoperations.checkMask(self._image, self._mask, **settings)
if correctedMask is not None:
self._mask = correctedMask
self._imageType = None
return True
def getImage(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_image
def getMask(self, imageType):
if self._imageType != imageType:
self._applyFilter(imageType)
return self._current_mask
def _applyFilter(self, imageType):
if imageType == 'original':
self._current_image, self._current_mask = imageoperations.cropToTumorMask(self._image, self._mask, self._bb)
else:
raise NotImplementedError()
self._imageType = imageType
def getSettings(self):
return self._current_config.get('Settings', {})
def checkResult(self, featureName, value):
"""
Use utility methods to get and test the results against the expected baseline value for this key.
"""
longName = '_'.join(featureName)
if value is None:
self._diffs[self._test][longName] = None
self._results[self._test][longName] = None
assert (value is not None)
if math.isnan(value):
self._diffs[self._test][longName] = numpy.nan
self._results[self._test][longName] = numpy.nan
assert (not math.isnan(value))
# save the result using the baseline class and feature names
self._logger.debug('checkResults: featureName = %s', featureName)
self._results[self._test][longName] = value
baselineValue = self._baseline[self._featureClassName].getBaselineValue(self._test, longName)
assert baselineValue is not None
baselineValue = float(baselineValue)
self._logger.debug('checkResults: for featureName %s, got baseline value = %f', featureName, baselineValue)
if baselineValue == 0.0:
# avoid divide by zero, the difference is either 0% if the value is also zero, or 100%
if value - baselineValue == 0.0:
percentDiff = 0.0
else:
percentDiff = 1.0
else:
percentDiff = abs(1.0 - (value / baselineValue))
# save the difference
self._diffs[self._test][longName] = percentDiff
# check for a less than three percent difference
if (percentDiff >= 0.03):
self._logger.error('checkResult %s, baseline value = %f, calculated = %f, diff = %f%%', featureName,
float(baselineValue), value, percentDiff * 100)
assert (percentDiff < 0.03)
def getResults(self):
return self._results
def getDiffs(self):
return self._diffs
def getDataDir(self):
return self._dataDir
def writeCSV(self, data, fileName):
"""
Write out data in a csv file.
Assumes a data structure with:
{'id1' : {'f1':n1, 'f2':n2}, 'id2' : {'f1':n3, 'f2':n4}}
"""
# Get the headers from the first testCase in _testedSet
# If no tests were run, the length of _testedSet will be 0, and no files should be written
if len(self._testedSet) > 0:
with open(fileName, 'w') as csvFile:
csvFileWriter = csv.writer(csvFile, lineterminator='\n')
testedCases = sorted(self._testedSet)
header = sorted(data[testedCases[0]].keys())
header = ['testCase'] + header
csvFileWriter.writerow(header)
for testCase in testedCases:
thisCase = data[testCase]
thisCase['testCase'] = testCase
row = []
for h in header:
row = row + [thisCase.get(h, "N/A")]
csvFileWriter.writerow(row)
self._logger.info('Wrote to file %s', fileName)
else:
self._logger.info('No test cases run, aborting file write to %s', fileName)
class PyRadiomicsBaseline:
def __init__(self, featureClassName):
self.logger = logging.getLogger('radiomics.testing.baseline')
self.cls = featureClassName
self.configuration = {}
self.baseline = {}
self.tests = set()
@classmethod
def readBaselineFile(cls, baselineFile):
featureClassName = os.path.basename(baselineFile)[9:-4]
new_baseline = cls(featureClassName)
new_baseline.logger.debug('Reading baseline for class %s', new_baseline.cls)
with open(baselineFile, 'r' if six.PY3 else 'rb') as baselineReader:
csvReader = csv.reader(baselineReader)
tests = six.next(csvReader)[1:]
for case in tests:
new_baseline.configuration[case] = {}
new_baseline.baseline[case] = {}
for testRow in csvReader:
for case_idx, case in enumerate(tests, start=1):
if 'general_info' in testRow[0]:
new_baseline.configuration[case][testRow[0]] = testRow[case_idx]
else:
new_baseline.baseline[case][testRow[0]] = testRow[case_idx]
new_baseline.tests = set(tests)
return new_baseline
def getTestConfig(self, test):
if test not in self.configuration:
return {} # This test is not present in the baseline for this class
config = {
'TestCase': self.configuration[test].get('general_info_TestCase', None),
'Settings': ast.literal_eval(self.configuration[test].get('general_info_GeneralSettings', '{}')),
}
if 'general_info_ImageHash' in self.configuration[test]:
config['ImageHash'] = self.configuration[test]['general_info_ImageHash']
if 'general_info_MaskHash' in self.configuration[test]:
config['MaskHash'] = self.configuration[test]['general_info_MaskHash']
if config['TestCase'] is None:
self.logger.error('Missing key "general_info_TestCase". Cannot configure!')
return None
return config
def getTestFeatures(self, test):
"""
Gets all features for which a baseline value is available for the current class and test case. Returns a list
containing the feature names.
"""
if test not in self.baseline:
return None # This test is not present in the baseline for this class
return list(self.baseline[test].keys())
def getBaselineValue(self, test, featureName):
if test not in self.baseline:
return None
return self.baseline[test].get(featureName, None)
def writeBaselineFile(self, baselineDir):
baselineFile = os.path.join(baselineDir, 'baseline_%s.csv' % self.cls)
testCases = list(self.baseline.keys())
with open(baselineFile, 'wb') as baseline:
csvWriter = csv.writer(baseline)
header = ['featureName'] + testCases
csvWriter.writerow(header)
config = self.configuration[testCases[0]].keys()
for c in config:
row = [c]
for testCase in testCases:
row.append(str(self.configuration[testCase].get(c, '')))
csvWriter.writerow(row)
features = self.baseline[testCases[0]].keys()
for f in features:
row = [f]
for testCase in testCases:
row.append(str(self.baseline[testCase].get(f, '')))
csvWriter.writerow(row)
| 36.228792 | 122 | 0.67679 | [
"BSD-3-Clause"
] | NPCC-Joe/Radiomics-pyradiomics | tests/testUtils.py | 14,093 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
# IntegrityError Exception for checking duplicate entry,
# connection import to establish connection to database
from django.db import IntegrityError, connection
# Used for serializing object data to json string
from django.core.serializers.json import DjangoJSONEncoder
from django.core.serializers import serialize
# Django HTTP Request
from django.shortcuts import render, redirect
from django.http import HttpResponse, Http404, HttpResponseForbidden, HttpResponseRedirect, JsonResponse
# Generic views as Class
from django.views.generic import TemplateView
from django.views.generic.list import ListView
from django.views import View
# system imports
import sys, os, csv, json, datetime, calendar, re
# Django utils
from django.utils import timezone, safestring
from django.utils.decorators import method_decorator
# Django authentication
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import make_password
# Django Messaging Framework
from django.contrib import messages
# Conditional operators and exception for models
from django.db.models import Q, Count, Sum, Prefetch
from django.core.exceptions import ObjectDoesNotExist
# Paginator class import
from django.core.paginator import Paginator, InvalidPage, EmptyPage, PageNotAnInteger
# Helpers
import app.user_helper as user_helper
import app.records_helper as records_helper
# Forms
from app.forms import *
#=========================================================================================
# GET SUB CATEGORY ON BASIS OF CATEGORY
#=========================================================================================
def get_sub_category(request):
sub_cat_list = request.GET.getlist("cat_id[]")
if len(sub_cat_list) > 0:
sub_cats = records_helper.SubCategoryList(sub_cat_list)
html = []
for sub in sub_cats:
html.append('<option value="'+str(sub.id)+'">'+str(sub)+'</option>');
return HttpResponse(''.join(html))
return HttpResponse('') | 33.545455 | 104 | 0.714995 | [
"MIT"
] | lawrence-gandhar/data_security_project | app/views/combiners_views.py | 2,214 | Python |
# -*- coding: utf-8 -*-
# This script was written by Takashi SUGA on April-August 2017
# You may use and/or modify this file according to the license described in the MIT LICENSE.txt file https://raw.githubusercontent.com/suchowan/watson-api-client/master
"""『重要文抽出によるWebページ要約のためのHTMLテキスト分割』
http://harp.lib.hiroshima-u.ac.jp/hiroshima-cu/metadata/5532
を参考にした HTML テキスト化処理
"""
import codecs
import re
class Article:
# この順に文字コードを試みる
encodings = [
"utf-8",
"cp932",
"euc-jp",
"iso-2022-jp",
"latin_1"
]
# ブロックレベル要素抽出正規表現
block_level_tags = re.compile("(?i)</?(" + "|".join([
"address", "blockquote", "center", "dir", "div", "dl",
"fieldset", "form", "h[1-6]", "hr", "isindex", "menu",
"noframes", "noscript", "ol", "pre", "p", "table", "ul",
"dd", "dt", "frameset", "li", "tbody", "td", "tfoot",
"th", "thead", "tr"
]) + ")(>|[^a-z].*?>)")
def __init__(self, path):
print(path)
self.path = path
self.contents = self.get_contents()
# self.contents = self.get_title()
def get_contents(self):
for encoding in self.encodings:
try:
lines = codecs.open(self.path, 'r', encoding)
html = ' '.join(line.rstrip('\r\n') for line in lines)
return self.__get_contents_in_html(html)
except UnicodeDecodeError:
continue
print('Cannot detect encoding of ' + self.path)
return None
def __get_contents_in_html(self, html):
parts = re.split("(?i)<(?:body|frame).*?>", html, 1)
if len(parts) == 2:
head, body = parts
else:
print('Cannot split ' + self.path)
body = html
body = re.sub(r"(?i)<(script|style|select).*?>.*?</\1\s*>", " ", body)
body = re.sub(self.block_level_tags, ' _BLOCK_LEVEL_TAG_ ', body)
body = re.sub(r"(?i)<a\s.+?>", ' _ANCHOR_LEFT_TAG_ ', body)
body = re.sub("(?i)</a>", ' _ANCHOR_RIGHT_TAG_ ', body)
body = re.sub("(?i)<[/a-z].*?>", " ", body)
return re.sub(" +", " ", "".join(self.__get_contents_in_body(body)))
def __get_contents_in_body(self, body):
for block in body.split("_BLOCK_LEVEL_TAG_"):
yield from self.__get_contents_in_block(block)
def __get_contents_in_block(self, block):
self.in_sentence = False
for unit in block.split("。"):
yield from self.__get_contents_in_unit(unit)
if self.in_sentence:
yield '。\n'
def __get_contents_in_unit(self, unit):
image_link = "_ANCHOR_LEFT_TAG_ +_ANCHOR_RIGHT_TAG_"
unit = re.sub(image_link, " ", unit)
if re.match(r"^ *$", unit):
return
fragment_tag = "((?:_ANCHOR_LEFT_TAG_ .+?_ANCHOR_LEFT_TAG_ ){2,})"
for fragment in re.split(fragment_tag, unit):
yield from self.__get_contents_in_fragment(fragment)
def __get_contents_in_fragment(self, fragment):
fragment = re.sub("_ANCHOR_(LEFT|RIGHT)_TAG_", ' ', fragment)
if re.match(r"^ *$", fragment):
return
text_unit = TextUnit(fragment)
if text_unit.is_sentence():
# 文ユニットは“ 。”で終わる
if self.in_sentence:
yield '。\n'
yield text_unit.separated
yield ' 。\n'
self.in_sentence = False
else:
# 非文ユニットは“―。”で終わる
# (制約) 論文と相違し非文ユニットは結合のみ行い分割していない
yield text_unit.separated
yield '―'
self.in_sentence = True
def get_title(self):
return self.path.split('/')[-1]
from janome.tokenizer import Tokenizer
from collections import defaultdict
import mojimoji
#import re
class TextUnit:
tokenizer = Tokenizer("user_dic.csv", udic_type="simpledic", udic_enc="utf8")
def __init__(self,fragment):
self.fragment = fragment
self.categories = defaultdict(int)
separated = []
for token in self.tokenizer.tokenize(self.preprocess(self.fragment)):
self.categories[self.categorize(token.part_of_speech)] += 1
separated.append(token.surface)
separated.append('')
self.separated = '/'.join(separated)
def categorize(self,part_of_speech):
if re.match("^名詞,(一般|代名詞|固有名詞|サ変接続|[^,]+語幹)", part_of_speech):
return '自立'
if re.match("^動詞", part_of_speech) and not re.match("サ変", part_of_speech):
return '自立'
if re.match("^形容詞,自立", part_of_speech):
return '自立'
if re.match("^助詞", part_of_speech):
return '助詞'
if re.match("^助動詞", part_of_speech):
return '助動詞'
return 'その他'
def is_sentence(self):
if self.categories['自立'] == 0:
return False
match = 0
if self.categories['自立'] >= 7:
match += 1
if 100 * self.categories['自立'] / sum(self.categories.values()) <= 64:
match += 1
if 100 * (self.categories['助詞'] + self.categories['助動詞']) / self.categories['自立'] >= 22:
# 論文通り「付属語 = 助詞 ⋃ 助動詞」と解釈 (通常の定義と異なる)
match += 1
if 100 * self.categories['助詞'] / self.categories['自立'] >= 26:
match += 1
if 100 * self.categories['助動詞'] / self.categories['自立'] >= 6:
match += 1
return match >= 3
def preprocess(self, text):
text = re.sub("&[^;]+;", " ", text)
text = mojimoji.han_to_zen(text, digit=False)
text = re.sub('(\t | )+', " ", text)
return text
if __name__ == '__main__':
import glob
import os
path_pattern = '/home/samba/example/links/bookmarks.crawled/**/*.html'
# The converted plaintext is put as '/home/samba/example/links/bookmarks.plaintext/**/*.txt'
for path in glob.glob(path_pattern, recursive=True):
article = Article(path)
plaintext_path = re.sub("(?i)html?$", "txt", path.replace('.crawled', '.plaintext'))
plaintext_path = plaintext_path.replace('\\', '/')
plaintext_dir = re.sub("/[^/]+$", "", plaintext_path)
if not os.path.exists(plaintext_dir):
os.makedirs(plaintext_dir)
with codecs.open(plaintext_path, 'w', 'utf-8') as f:
f.write(article.contents)
| 36.413793 | 168 | 0.566288 | [
"CC0-1.0"
] | suchowan/bookmarks | scripts/python/html2plaintext.py | 6,788 | Python |
from django.contrib import admin
from graphite.events.models import Event
class EventsAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': ('when', 'what', 'data', 'tags',)
}),
)
list_display = ('when', 'what', 'data',)
list_filter = ('what',)
search_fields = ('tags', )
admin.site.register(Event, EventsAdmin)
| 22.9375 | 55 | 0.594005 | [
"Apache-2.0"
] | drax68/graphite-web | webapp/graphite/events/admin.py | 367 | Python |
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class UpdateHttpRedirectDetails(object):
"""
The details of a HTTP Redirect configured to redirect traffic from one hostname to another.
**Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
"""
def __init__(self, **kwargs):
"""
Initializes a new UpdateHttpRedirectDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param display_name:
The value to assign to the display_name property of this UpdateHttpRedirectDetails.
:type display_name: str
:param target:
The value to assign to the target property of this UpdateHttpRedirectDetails.
:type target: HttpRedirectTarget
:param response_code:
The value to assign to the response_code property of this UpdateHttpRedirectDetails.
:type response_code: int
:param freeform_tags:
The value to assign to the freeform_tags property of this UpdateHttpRedirectDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this UpdateHttpRedirectDetails.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'display_name': 'str',
'target': 'HttpRedirectTarget',
'response_code': 'int',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'display_name': 'displayName',
'target': 'target',
'response_code': 'responseCode',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._display_name = None
self._target = None
self._response_code = None
self._freeform_tags = None
self._defined_tags = None
@property
def display_name(self):
"""
Gets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:return: The display_name of this UpdateHttpRedirectDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this UpdateHttpRedirectDetails.
The user-friendly name of the HTTP Redirect. The name can be changed and does not need to be unique.
:param display_name: The display_name of this UpdateHttpRedirectDetails.
:type: str
"""
self._display_name = display_name
@property
def target(self):
"""
Gets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:return: The target of this UpdateHttpRedirectDetails.
:rtype: HttpRedirectTarget
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this UpdateHttpRedirectDetails.
The redirect target object including all the redirect data.
:param target: The target of this UpdateHttpRedirectDetails.
:type: HttpRedirectTarget
"""
self._target = target
@property
def response_code(self):
"""
Gets the response_code of this UpdateHttpRedirectDetails.
The response code returned for the redirect to the client. For more information, see `RFC 7231`__.
__ https://tools.ietf.org/html/rfc7231#section-6.4
:return: The response_code of this UpdateHttpRedirectDetails.
:rtype: int
"""
return self._response_code
@response_code.setter
def response_code(self, response_code):
"""
Sets the response_code of this UpdateHttpRedirectDetails.
The response code returned for the redirect to the client. For more information, see `RFC 7231`__.
__ https://tools.ietf.org/html/rfc7231#section-6.4
:param response_code: The response_code of this UpdateHttpRedirectDetails.
:type: int
"""
self._response_code = response_code
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this UpdateHttpRedirectDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this UpdateHttpRedirectDetails.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this UpdateHttpRedirectDetails.
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this UpdateHttpRedirectDetails.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
Gets the defined_tags of this UpdateHttpRedirectDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this UpdateHttpRedirectDetails.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this UpdateHttpRedirectDetails.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this UpdateHttpRedirectDetails.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 34.312217 | 245 | 0.66148 | [
"Apache-2.0"
] | revnav/sandbox | darling_ansible/python_venv/lib/python3.7/site-packages/oci/waas/models/update_http_redirect_details.py | 7,583 | Python |
#sum = 10
def func1():
#sum = 20
print('Local1:', sum)
def func2():
#sum = 30
print('Local 2:', sum)
func2()
func1()
print("Global:", sum([1, 2, 3]))
| 11.6875 | 32 | 0.459893 | [
"Apache-2.0"
] | zevgenia/Python_shultais | Course/functions/example_12.py | 187 | Python |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class ContractResourceAssetAvailiabilityCodesCode(GenericTypeCode):
"""
ContractResourceAssetAvailiabilityCodes
From: http://hl7.org/fhir/asset-availability in valuesets.xml
This value set has asset availability codes.
"""
def __init__(self, value: AutoMapperTextInputType):
super().__init__(value=value)
"""
http://hl7.org/fhir/asset-availability
"""
codeset: FhirUri = "http://hl7.org/fhir/asset-availability"
class ContractResourceAssetAvailiabilityCodesCodeValues:
"""
To be completed
From: http://hl7.org/fhir/asset-availability in valuesets.xml
"""
Lease = ContractResourceAssetAvailiabilityCodesCode("lease")
| 30.941176 | 84 | 0.773764 | [
"Apache-2.0"
] | icanbwell/SparkAutoMapper.FHIR | spark_auto_mapper_fhir/value_sets/contract_resource_asset_availiability_codes.py | 1,052 | Python |
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is a class for RandomMirrow_pair."""
import numpy as np
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.TRANSFORM)
class RandomMirrow_pair(object):
"""Random mirrow two related image."""
def __call__(self, image, label):
"""Call function of RandomMirrow_pair.
:param image: usually the feature image, for example, the LR image for super solution dataset,
the initial image for the segmentation dataset, and etc
:type image: PIL image
:param label: usually the label image, for example, the HR image for super solution dataset,
the mask image for the segmentation dataset, and etc
:type lebel: PIL image
:return: the image after transform
:rtype: list, erery item is a PIL image, the first one is feature image, the second is label image
"""
flip = np.random.choice(2) * 2 - 1
channels_image = image.shape[-1]
channels_label = label.shape[-1]
if channels_image == 3:
image = image[:, :, ::flip]
else:
image = image[:, ::flip]
if channels_label == 3:
label = label[:, :, ::flip]
else:
label = label[:, ::flip]
return image, label
| 38.863636 | 106 | 0.65731 | [
"MIT"
] | NiuRc/vega | vega/datasets/transforms/RandomMirrow_pair.py | 1,710 | Python |
import pytest
import autofit as af
from autofit.mock import mock as m
@pytest.fixture(
name="target_gaussian"
)
def make_target_gaussian():
return af.PriorModel(
m.Gaussian
)
@pytest.fixture(
name="prior"
)
def make_prior():
return af.UniformPrior()
@pytest.fixture(
name="source_gaussian"
)
def make_source_gaussian(prior):
return af.PriorModel(
m.Gaussian,
centre=prior
)
def test_simple(
source_gaussian,
target_gaussian,
prior
):
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == prior
def test_assertions(
source_gaussian,
target_gaussian
):
target_gaussian.add_assertion(
target_gaussian.centre <= target_gaussian.intensity
)
with pytest.raises(AssertionError):
target_gaussian.take_attributes(
source_gaussian
)
def test_assertions_collection(
source_gaussian,
target_gaussian
):
target_gaussian.add_assertion(
target_gaussian.centre <= target_gaussian.intensity
)
target_collection = af.Collection(
gaussian=target_gaussian
)
source_collection = af.Collection(
gaussian=source_gaussian
)
with pytest.raises(AssertionError):
target_collection.take_attributes(
source_collection
)
def test_in_collection(
source_gaussian,
target_gaussian,
prior
):
target = af.CollectionPriorModel(
gaussian=target_gaussian
)
source = af.CollectionPriorModel(
gaussian=source_gaussian
)
target.take_attributes(
source
)
assert target.gaussian.centre == prior
def test_tuple(
source_gaussian,
target_gaussian,
prior
):
source_gaussian.centre = (prior, 1.0)
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == (prior, 1.0)
def test_tuple_prior(
source_gaussian,
target_gaussian,
prior
):
source_gaussian.centre = (prior, 1.0)
target_gaussian.centre = af.TuplePrior()
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == (prior, 1.0)
def test_tuple_in_instance(
target_gaussian,
prior
):
# noinspection PyTypeChecker
source_gaussian = m.Gaussian(
centre=(prior, 1.0)
)
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == (prior, 1.0)
def test_tuple_in_collection(
source_gaussian,
target_gaussian,
prior
):
source_gaussian.centre = (prior, 1.0)
source = af.CollectionPriorModel(
gaussian=source_gaussian
)
target = af.CollectionPriorModel(
gaussian=target_gaussian
)
target.take_attributes(source)
assert target.gaussian.centre == (prior, 1.0)
def test_tuple_in_instance_in_collection(
target_gaussian,
prior
):
# noinspection PyTypeChecker
source_gaussian = m.Gaussian(
centre=(prior, 1.0)
)
source = af.CollectionPriorModel(
gaussian=source_gaussian
)
target = af.CollectionPriorModel(
gaussian=target_gaussian
)
target.take_attributes(source)
assert target.gaussian.centre == (prior, 1.0)
def test_source_is_dict(
source_gaussian,
target_gaussian,
prior
):
source = dict(
gaussian=source_gaussian
)
target = af.CollectionPriorModel(
gaussian=target_gaussian
)
target.take_attributes(source)
assert target.gaussian.centre == prior
def test_target_is_dict(
source_gaussian,
target_gaussian,
prior
):
source = af.CollectionPriorModel(
collection=af.CollectionPriorModel(
gaussian=source_gaussian
)
)
target = af.CollectionPriorModel(
collection=dict(
gaussian=target_gaussian
)
)
target.take_attributes(source)
assert target.collection.gaussian.centre == prior
def test_missing_from_source(
target_gaussian,
prior
):
target_gaussian.centre = prior
target_gaussian.take_attributes(
af.CollectionPriorModel()
)
assert target_gaussian.centre == prior
def test_unlabelled_in_collection(
source_gaussian,
target_gaussian,
prior
):
target = af.CollectionPriorModel(
[target_gaussian]
)
source = af.CollectionPriorModel(
[source_gaussian]
)
target.take_attributes(
source
)
assert target[0].centre == prior
def test_passing_float(
source_gaussian,
target_gaussian
):
source_gaussian.centre = 2.0
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre == 2.0
def test_missing_from_origin(
target_gaussian
):
target_gaussian.take_attributes(
af.CollectionPriorModel()
)
def test_limits(
source_gaussian,
target_gaussian
):
source_gaussian.centre = af.GaussianPrior(
mean=0,
sigma=1,
lower_limit=-1,
upper_limit=1
)
target_gaussian.take_attributes(
source_gaussian
)
assert target_gaussian.centre.lower_limit == -1
assert target_gaussian.centre.upper_limit == 1
def test_tuples():
centre = (0.0, 1.0)
source = af.Model(
m.Gaussian,
centre=centre
)
target = af.Model(
m.Gaussian
)
target.take_attributes(source)
assert target.centre == centre
| 20.335616 | 60 | 0.616201 | [
"MIT"
] | rhayes777/PyAutoF | test_autofit/mapper/test_take_attributes.py | 5,938 | Python |
#
# Copyright 2017 Alsanium, SAS. or its affiliates. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import handler
class Context:
def get_remaining_time_in_millis(self):
pass
def log(self):
pass
class TestCase(unittest.TestCase):
def test_case(self):
with self.assertRaisesRegexp(AttributeError, "runtime: symbol Handle is not valid"):
handler.Handle({}, Context())
| 28.205882 | 92 | 0.727842 | [
"Apache-2.0"
] | LIVEauctioneers/aws-lambda-go-shim | tests/sig_param_count/test.py | 959 | Python |
# USAGE
# python hard_negative_mine.py --conf conf/cars.json
# import the necessary packages
from __future__ import print_function
from pyimagesearch.object_detection import ObjectDetector
from pyimagesearch.descriptors import HOG
from pyimagesearch.utils import dataset
from pyimagesearch.utils import Conf
from imutils import paths
import numpy as np
import progressbar
import argparse
import pickle
import random
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True, help="path to the configuration file")
args = vars(ap.parse_args())
# load the configuration file and initialize the data list
conf = Conf(args["conf"])
data = []
# load the classifier, then initialize the Histogram of Oriented Gradients descriptor
# and the object detector
model = pickle.loads(open(conf["classifier_path"], "rb").read())
hog = HOG(orientations=conf["orientations"], pixelsPerCell=tuple(conf["pixels_per_cell"]),
cellsPerBlock=tuple(conf["cells_per_block"]), normalize=conf["normalize"], block_norm="L1")
od = ObjectDetector(model, hog)
# grab the set of distraction paths and randomly sample them
dstPaths = list(paths.list_images(conf["image_distractions"]))
dstPaths = random.sample(dstPaths, conf["hn_num_distraction_images"])
# setup the progress bar
widgets = ["Mining: ", progressbar.Percentage(), " ", progressbar.Bar(), " ", progressbar.ETA()]
pbar = progressbar.ProgressBar(maxval=len(dstPaths), widgets=widgets).start()
# loop over the distraction paths
for (i, imagePath) in enumerate(dstPaths):
# load the image and convert it to grayscale
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect objects in the image
(boxes, probs) = od.detect(gray, conf["window_dim"], winStep=conf["hn_window_step"],
pyramidScale=conf["hn_pyramid_scale"], minProb=conf["hn_min_probability"])
# loop over the bounding boxes
for (prob, (startX, startY, endX, endY)) in zip(probs, boxes):
# extract the ROI from the image, resize it to a known, canonical size, extract
# HOG features from teh ROI, and finally update the data
roi = cv2.resize(gray[startY:endY, startX:endX], tuple(conf["window_dim"]),
interpolation=cv2.INTER_AREA)
features = hog.describe(roi)
data.append(np.hstack([[prob], features]))
# update the progress bar
pbar.update(i)
# sort the data points by confidence
pbar.finish()
print("[INFO] sorting by probability...")
data = np.array(data)
data = data[data[:, 0].argsort()[::-1]]
# dump the dataset to file
print("[INFO] dumping hard negatives to file...")
dataset.dump_dataset(data[:, 1:], [-1] * len(data), conf["features_path"], "hard_negatives",
writeMethod="a") | 37.369863 | 96 | 0.752933 | [
"Apache-2.0"
] | CactusJackFX/PyImageSearch_Guru | Module_02_Building_Your_Own_Custom_Object_Detector/2.10_Re-Training_and_Running_your_Classifier/hard_negative_mine.py | 2,728 | Python |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the tests for the code-blocks in the standalone-transaction.md file."""
import logging
import os
from unittest.mock import patch
import pytest
from aea.test_tools.test_cases import BaseAEATestCase
from tests.conftest import CUR_PATH, MAX_FLAKY_RERUNS_INTEGRATION, ROOT_DIR
from tests.test_docs.helper import extract_code_blocks, extract_python_code
from tests.test_docs.test_standalone_transaction.standalone_transaction import (
logger,
run,
)
MD_FILE = "docs/standalone-transaction.md"
PY_FILE = "test_docs/test_standalone_transaction/standalone_transaction.py"
test_logger = logging.getLogger(__name__)
class TestStandaloneTransaction(BaseAEATestCase):
"""This class contains the tests for the code-blocks in the agent-vs-aea.md file."""
@classmethod
def _patch_logger(cls):
cls.patch_logger_info = patch.object(logger, "info")
cls.mocked_logger_info = cls.patch_logger_info.__enter__()
@classmethod
def _unpatch_logger(cls):
cls.mocked_logger_info.__exit__()
@classmethod
def setup_class(cls):
"""Setup the test class."""
super().setup_class()
cls._patch_logger()
doc_path = os.path.join(ROOT_DIR, MD_FILE)
cls.code_blocks = extract_code_blocks(filepath=doc_path, filter_="python")
test_code_path = os.path.join(CUR_PATH, PY_FILE)
cls.python_file = extract_python_code(test_code_path)
def test_read_md_file(self):
"""Test the last code block, that is the full listing of the demo from the Markdown."""
assert (
self.code_blocks[-1] == self.python_file
), "Files must be exactly the same."
@pytest.mark.integration(reruns=MAX_FLAKY_RERUNS_INTEGRATION)
def test_run_end_to_end(self):
"""Run the transaction from the file."""
try:
run()
self.mocked_logger_info.assert_any_call("Transaction complete.")
except RuntimeError:
test_logger.info("RuntimeError: Some transactions have failed")
def test_code_blocks_exist(self):
"""Test that all the code-blocks exist in the python file."""
for blocks in self.code_blocks:
assert (
blocks in self.python_file
), "Code-block doesn't exist in the python file."
| 35.404494 | 95 | 0.671215 | [
"Apache-2.0"
] | valory-xyz/agents-aea | tests/test_docs/test_standalone_transaction/test_standalone_transaction.py | 3,151 | Python |
"""Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
"""
import collections as _collections
from tensorflow.python.eager import execute as _execute
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('PeriodicResample')
def periodic_resample(values, shape, name=None):
r"""Periodically resample elements of a tensor to conform to `shape`.
This function implements a slightly more generic version of the subpixel
convolutions found in this [paper](https://arxiv.org/abs/1609.05158).
The formula for computing the elements in the `output` tensor is as follows:
`T` = `values` tensor of rank `R`
`S` = desired `shape` of output tensor (vector of length `R`)
`P` = `output` tensor of rank `R`
\((T_1,\ldots,T_R)\) = shape(`T`)
\([S_1,\ldots,S_q,\ldots,S_R]\) = elements of vector `S`
A single element in `S` is left unspecified (denoted \(S_q=-1\)).
Let \(f_i\) denote the (possibly non-integer) factor that relates the original
dimension to the desired dimensions, \(S_i=f_i T_i\), for \(i\neq q\) where
\(f_i>0\).
Define the following:
\(g_i=\lceil f_i\rceil\)
\(t=\prod_i T_i\)
\(s=\prod_{i\neq q} S_i\)
\(S_q\) can then be defined as by \(S_q=\lfloor t/s\rfloor\).
The elements of the resulting tensor are defined as
\(P_{s_1,\ldots,s_R}=T_{h_1,\ldots,h_q,\ldots,h_R}\).
The \(h_i\) (\(i\neq q\)) are defined by \(h_i=\lfloor s_i/g_i\rfloor\).
\(h_q=S_q\sum_{j\neq q}^{q-1}G_j \mathrm{mod}(s_j,g_j) + s_q\), where
\(G_j=\prod_{i}^{j-1}g_i\) (\(G_0=1\)).
One drawback of this method is that whenever the output dimensions are slightly
less than integer multiples of the input dimensions, many of the tensor elements
are repeated in an inefficient way. This is resolved by specifying that all
desired dimensions are integer multiples of the input tensor.
For example:
```prettyprint
`input` is [[ 0 1 2 3]
[ 4 5 6 7]
[ 8 9 10 11]]
tf.periodic_resample(input, [6, None]) ==> [[ 0 1]
[ 2 3]
[ 4 5]
[ 6 7]
[ 8 9]
[10 11]]
```
Args:
values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`, `uint32`, `uint64`, `bfloat16`.
The tensor of rank `R` to periodic_resample
shape: A `tf.TensorShape` or list of `ints`.
A 1-D tensor representing the desired shape of the output tensor.
Exactly one element of this tensor must have the value `None` which represents
that this dimension of `values` can be adjusted downward in order to
accommodate increases in other dimensions. The specified sizes of the
non-adjustable dimensions must by at least as large as in the `values` tensor.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `values`.
Periodically resampled tensor that has dimensions specified as in
`shape` except that the dimension specified as `None` will be minimally
decreased as necessary.
"""
shape = _execute.make_shape(shape, "shape")
_ctx = _context.context()
if _ctx.in_graph_mode():
_, _, _op = _op_def_lib._apply_op_helper(
"PeriodicResample", values=values, shape=shape, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = ("T", _op.get_attr("T"), "shape", _op.get_attr("shape"))
else:
_attr_T, (values,) = _execute.args_to_matching_eager([values], _ctx)
_inputs_flat = [values]
_attrs = ("T", _attr_T, "shape", shape)
_result = _execute.execute(b"PeriodicResample", 1, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"PeriodicResample", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "PeriodicResample"
# input_arg {
# name: "values"
# type_attr: "T"
# }
# output_arg {
# name: "output"
# type_attr: "T"
# }
# attr {
# name: "T"
# type: "type"
# allowed_values {
# list {
# type: DT_FLOAT
# type: DT_DOUBLE
# type: DT_INT64
# type: DT_INT32
# type: DT_UINT8
# type: DT_UINT16
# type: DT_INT16
# type: DT_INT8
# type: DT_COMPLEX64
# type: DT_COMPLEX128
# type: DT_QINT8
# type: DT_QUINT8
# type: DT_QINT32
# type: DT_HALF
# type: DT_UINT32
# type: DT_UINT64
# type: DT_BFLOAT16
# }
# }
# }
# attr {
# name: "shape"
# type: "shape"
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n^\n\020PeriodicResample\022\013\n\006values\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\t\003\004\021\005\006\010\022\013\014\r\023\026\027\016\"\016\n\005shape\022\005shape")
| 38.891026 | 247 | 0.647602 | [
"Apache-2.0"
] | gian1312/suchen | tensorflow/contrib/periodic_resample/python/ops/gen_periodic_resample_op.py | 6,067 | Python |
import os
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
SQLALCHEMY_TRACK_MODIFICATIONS = False
UPLOADED_PHOTOS_DEST = 'app/static/photos'
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
@staticmethod
def init_app(app):
pass
class TestConfig(Config):
pass
class ProdConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
if SQLALCHEMY_DATABASE_URI and SQLALCHEMY_DATABASE_URI.startswith("postgres://"):
SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI.replace("postgres://", "postgresql://", 1)
pass
class DevConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:Anna123!@localhost/blogapp1'
DEBUG = True
config_options = {
'development': DevConfig,
'production': ProdConfig,
'test': TestConfig
} | 22.791667 | 100 | 0.707495 | [
"Unlicense"
] | AnnabelNkir/My_Hello_World | config.py | 1,094 | Python |
import os
import re
import sys
import uuid
import redis
from cryptography.fernet import Fernet
from flask import abort, Flask, render_template, request
from redis.exceptions import ConnectionError
from werkzeug.urls import url_quote_plus
from werkzeug.urls import url_unquote_plus
NO_SSL = os.environ.get('NO_SSL', False)
TOKEN_SEPARATOR = '~'
# Initialize Flask Application
app = Flask(__name__)
if os.environ.get('DEBUG'):
app.debug = True
app.secret_key = os.environ.get('SECRET_KEY', 'Secret Key')
app.config.update(
dict(STATIC_URL=os.environ.get('STATIC_URL', 'static')))
# Initialize Redis
if os.environ.get('MOCK_REDIS'):
from mockredis import mock_strict_redis_client
redis_client = mock_strict_redis_client()
elif os.environ.get('REDIS_URL'):
redis_client = redis.StrictRedis.from_url(os.environ.get('REDIS_URL'))
else:
redis_host = os.environ.get('REDIS_HOST', 'localhost')
redis_port = os.environ.get('REDIS_PORT', 6379)
redis_db = os.environ.get('SNAPPASS_REDIS_DB', 0)
redis_client = redis.StrictRedis(
host=redis_host, port=redis_port, db=redis_db)
REDIS_PREFIX = os.environ.get('REDIS_PREFIX', 'snappass')
TIME_CONVERSION = {'week': 604800, 'day': 86400, 'hour': 3600}
def check_redis_alive(fn):
def inner(*args, **kwargs):
try:
if fn.__name__ == 'main':
redis_client.ping()
return fn(*args, **kwargs)
except ConnectionError as e:
print('Failed to connect to redis! %s' % e.message)
if fn.__name__ == 'main':
sys.exit(0)
else:
return abort(500)
return inner
def encrypt(password):
"""
Take a password string, encrypt it with Fernet symmetric encryption,
and return the result (bytes), with the decryption key (bytes)
"""
encryption_key = Fernet.generate_key()
fernet = Fernet(encryption_key)
encrypted_password = fernet.encrypt(password.encode('utf-8'))
return encrypted_password, encryption_key
def decrypt(password, decryption_key):
"""
Decrypt a password (bytes) using the provided key (bytes),
and return the plain-text password (bytes).
"""
fernet = Fernet(decryption_key)
return fernet.decrypt(password)
def parse_token(token):
token_fragments = token.split(TOKEN_SEPARATOR, 1) # Split once, not more.
storage_key = token_fragments[0]
try:
decryption_key = token_fragments[1].encode('utf-8')
except IndexError:
decryption_key = None
return storage_key, decryption_key
@check_redis_alive
def set_password(password, ttl):
"""
Encrypt and store the password for the specified lifetime.
Returns a token comprised of the key where the encrypted password
is stored, and the decryption key.
"""
storage_key = REDIS_PREFIX + uuid.uuid4().hex
encrypted_password, encryption_key = encrypt(password)
redis_client.setex(storage_key, ttl, encrypted_password)
encryption_key = encryption_key.decode('utf-8')
token = TOKEN_SEPARATOR.join([storage_key, encryption_key])
return token
@check_redis_alive
def get_password(token):
"""
From a given token, return the initial password.
If the token is tilde-separated, we decrypt the password fetched from Redis.
If not, the password is simply returned as is.
"""
storage_key, decryption_key = parse_token(token)
password = redis_client.get(storage_key)
redis_client.delete(storage_key)
if password is not None:
if decryption_key is not None:
password = decrypt(password, decryption_key)
return password.decode('utf-8')
@check_redis_alive
def password_exists(token):
storage_key, decryption_key = parse_token(token)
return redis_client.exists(storage_key)
def empty(value):
if not value:
return True
def clean_input():
"""
Make sure we're not getting bad data from the front end,
format data to be machine readable
"""
if empty(request.form.get('password', '')):
abort(400)
if empty(request.form.get('ttl', '')):
abort(400)
time_period = request.form['ttl'].lower()
if time_period not in TIME_CONVERSION:
abort(400)
return TIME_CONVERSION[time_period], request.form['password']
@app.route('/', methods=['GET'])
def index():
return render_template('set_password.html')
@app.route('/', methods=['POST'])
def handle_password():
ttl, password = clean_input()
token = set_password(password, ttl)
if NO_SSL:
base_url = request.url_root
else:
base_url = request.url_root.replace("http://", "https://")
link = base_url + url_quote_plus(token)
return render_template('confirm.html', password_link=link)
@app.route('/<password_key>', methods=['GET'])
def preview_password(password_key):
password_key = url_unquote_plus(password_key)
if not password_exists(password_key):
abort(404)
return render_template('preview.html')
@app.route('/<password_key>', methods=['POST'])
def show_password(password_key):
password_key = url_unquote_plus(password_key)
password = get_password(password_key)
if not password:
abort(404)
return render_template('password.html', password=password)
@check_redis_alive
def main():
app.run(host='0.0.0.0')
if __name__ == '__main__':
main()
| 27.292929 | 80 | 0.690044 | [
"MIT"
] | 47Billion/snappass | snappass/main.py | 5,404 | Python |
# Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Callable, Dict, Optional
from structlog import get_logger
from hathor.p2p.messages import ProtocolMessages
if TYPE_CHECKING:
from hathor.p2p.protocol import HathorProtocol # noqa: F401
logger = get_logger()
class BaseState:
protocol: 'HathorProtocol'
cmd_map: Dict[ProtocolMessages, Callable[[str], None]]
def __init__(self, protocol: 'HathorProtocol'):
self.log = logger.new(**protocol.get_logger_context())
self.protocol = protocol
self.cmd_map = {
ProtocolMessages.ERROR: self.handle_error,
ProtocolMessages.THROTTLE: self.handle_throttle,
}
# This variable is set by HathorProtocol after instantiating the state
self.state_name = None
def handle_error(self, payload: str) -> None:
self.protocol.handle_error(payload)
def handle_throttle(self, payload: str) -> None:
self.log.info('throttled', payload=payload)
def send_message(self, cmd: ProtocolMessages, payload: Optional[str] = None) -> None:
self.protocol.send_message(cmd, payload)
def send_throttle(self, key: str) -> None:
limit = self.protocol.ratelimit.get_limit(key)
if limit is None:
return
max_hits, window_seconds = limit
payload = '{} At most {} hits every {} seconds'.format(key, max_hits, window_seconds)
self.protocol.send_message(ProtocolMessages.THROTTLE, payload)
def on_enter(self) -> None:
raise NotImplementedError
def on_exit(self) -> None:
pass
def prepare_to_disconnect(self) -> None:
"""Called when we will disconnect with the peer."""
pass
| 33.397059 | 93 | 0.69749 | [
"Apache-2.0"
] | HathorNetwork/hathor-core | hathor/p2p/states/base.py | 2,271 | Python |
"""This module contains wunderkafka producer's boilerplate."""
| 31.5 | 62 | 0.777778 | [
"Apache-2.0"
] | severstal-digital/wunderkafka | wunderkafka/producers/__init__.py | 63 | Python |
from typing import List, Optional
from pydantic import BaseModel
from typing_extensions import Literal
from .request import BaseResponseData, CountOffsetParams, ListRequestParams, ListResponseData
from .tag import Tag
from .user import CommonUserDetails
class Comment(BaseModel):
# The ID of the post
aweme_id: str
# The ID of the comment
cid: str
# The timestamp in seconds when the comment was posted
create_time: int
# The number of times the comment has been liked
digg_count: int
# If this comment is replying to a comment, this array contains the original comment
reply_comment: Optional[List["Comment"]] = None
# If this comment is replying to a comment, the ID of that comment - "0" if not a reply
reply_id: str
# The status of the comment - 1 = published, 4 = published by you?
status: int
# The comment text
text: str
# Details about any tags in the comment
text_extra: List[Tag]
# Details about the author
user: CommonUserDetails
# 1 if the user likes the comment
user_digged: Literal[0, 1]
class ListCommentsRequest(ListRequestParams, CountOffsetParams):
# The ID of the post to list comments for
aweme_id: str
# ??? - default is 2
comment_style: Optional[int] = None
# ???
digged_cid = None
# ???
insert_cids = None
class ListCommentsResponse(ListResponseData, CountOffsetParams):
comments: List[Comment]
class PostCommentRequest(BaseModel):
# The ID of the post to comment on
aweme_id: str
# The comment text
text: str
# The ID of the comment that is being replied to
reply_id: Optional[str] = None
# Details about any tags in the comment
text_extra: List[Tag]
# ???
is_self_see: Literal[0, 1]
class PostCommentResponse(BaseResponseData):
# The comment that was posted
comment: Comment
| 22.630952 | 93 | 0.696476 | [
"MIT"
] | MikeOwino/tiktok_bot | tiktok_bot/models/comment.py | 1,901 | Python |
import pytest
from datetime import datetime, timedelta
import pytz
from bs4 import BeautifulSoup
from src.events import Events
from src.users import Users
from src.user import USER_ACCESS_MANAGER
from src.stores import MemoryStore
from src.email_generators import EventLocationChangedEmail
def test_event_location_changed_email():
store = MemoryStore()
events = Events(store)
users = Users(store)
start = datetime.now(pytz.timezone("America/New_York"))
dur = timedelta(hours=1)
end = start + dur
u = users.add("[email protected]", 'name', 'alias', 'psw', 8)
e = events.add('test', 'test', 30, start, dur, 'test', 'test',
'[email protected]', 'test', u)
email = EventLocationChangedEmail(e, e, '', root='./src')
html = email.generate(u)
soup = BeautifulSoup(html, 'html.parser')
assert html
assert type(html) == str
assert bool(soup.find())
assert soup.find("div", {"class": "user"}).string.strip() == 'name'
assert soup.find("a", {"class": "event-link"}).string.strip() == 'test'
assert soup.find("td", {"class": "event-location-text"}).string.strip() == 'test'
assert soup.find("div", {"class": "event-description"}).string.strip() == 'test'
| 38.375 | 85 | 0.664495 | [
"MIT"
] | fjacob21/mididecweb | backend/tests/email_generators/test_event_location_changed.py | 1,228 | Python |
import mmcv
def wider_face_classes():
return ['face']
def voc_classes():
return [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
]
def imagenet_det_classes():
return [
'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',
'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',
'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',
'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',
'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',
'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',
'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',
'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',
'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',
'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',
'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',
'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',
'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',
'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',
'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',
'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',
'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',
'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',
'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',
'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',
'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',
'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',
'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',
'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',
'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',
'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',
'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',
'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',
'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',
'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',
'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',
'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',
'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',
'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',
'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',
'whale', 'wine_bottle', 'zebra'
]
def imagenet_vid_classes():
return [
'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',
'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',
'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',
'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',
'watercraft', 'whale', 'zebra'
]
def coco_classes():
# return ['瓶盖破损','瓶盖变形','瓶盖坏边','瓶盖打旋','瓶盖断点','标贴歪斜','标贴起皱','标贴气泡','喷码正常','喷码异常']
return ['瓶盖破损', '瓶盖变形', '瓶盖坏边', '瓶盖打旋', '瓶盖断点' '喷码正常', '喷码异常']#pg
# return ['标贴歪斜', '标贴起皱', '标贴气泡']
# return [
# 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
# 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',
# 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
# 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
# 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
# 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',
# 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',
# 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
# 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',
# 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',
# 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
# 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
# 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'
# ]
def cityscapes_classes():
return [
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
]
dataset_aliases = {
'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],
'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],
'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],
'coco': ['coco', 'mscoco', 'ms_coco'],
'wider_face': ['WIDERFaceDataset', 'wider_face', 'WDIERFace'],
'cityscapes': ['cityscapes']
}
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError('Unrecognized dataset: {}'.format(dataset))
else:
raise TypeError('dataset must a str, but got {}'.format(type(dataset)))
return labels
| 46.834711 | 84 | 0.575437 | [
"Apache-2.0"
] | UESTC-Liuxin/TianChi | my_configs/new/mmdet/core/evaluation/class_names.py | 5,827 | Python |
# /usr/bin/env python3
"""Benchmark of handling PDB files comparing multiple libraries."""
import argparse
import glob
import os
import re
import subprocess
import sys
from pathlib import Path
def gather_libs(selected_libs):
libs = []
for path in sorted(glob.iglob("bench/*")):
lib = os.path.basename(path)
if not os.path.isdir(path) or (selected_libs and lib not in selected_libs):
continue
libs.append(lib)
return libs
def gather_tests(libs, selected_tests):
tests = []
for lib in libs:
for filepath in sorted(glob.iglob(os.path.join("bench", lib, "*"))):
test, _ = os.path.splitext(os.path.basename(filepath))
if test in tests or (selected_tests and test not in selected_tests):
continue
tests.append(test)
return tests
def parse_args(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-t", "--tests", help="Test names to run.")
parser.add_argument("-l", "--libraries", help="Library names to test.")
opts = parser.parse_args()
if opts.tests:
opts.tests = opts.tests.split(",")
if opts.libraries:
opts.libraries = opts.libraries.split(",")
return vars(opts)
def run_test(filepath, pdbfile, repeats=10):
*_, dirname, filename = Path(filepath).parts
basename, _ = os.path.splitext(filename)
pdbid, _ = os.path.splitext(os.path.basename(pdbfile))
print(format(f"{dirname}/{basename}/{pdbid}", "<40"), end="", flush=True)
if "schrodinger" in filepath:
cmd = [
os.path.join(os.environ["SCHRODINGER"], "run"),
filepath,
pdbfile,
str(repeats),
]
elif filepath.endswith(".py"):
cmd = ["python3", filepath, pdbfile, str(repeats)]
elif filepath.endswith(".cr"):
cmd = ["crystal", "run", "--release", filepath, "--", pdbfile, str(repeats)]
elif filepath.endswith(".tcl"):
cmd = [
"vmd",
"-dispdev",
"none",
"-e",
filepath,
"-args",
pdbfile,
str(repeats),
]
try:
output = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
output = output.decode(sys.stdout.encoding).strip()
try:
elapsed = float(output)
except ValueError:
elapsed = float(re.findall(r"elapsed *= *([\d\.e\-]+)", output)[0])
print(format(elapsed, ".6f"))
except subprocess.CalledProcessError:
print("failed")
opts = parse_args(sys.argv[1:])
libs = gather_libs(opts["libraries"])
tests = gather_tests(libs, opts["tests"])
pdbs = list(map(os.path.abspath, glob.glob("data/*.pdb")))
for test in tests:
for pdbfile in pdbs if test.startswith("parse") else ["data/1ake.pdb"]:
for lib in libs:
paths = glob.glob(f"bench/{lib}/{test}.*")
if not paths:
continue
run_test(paths[0], pdbfile, repeats=10 if "1htq" not in pdbfile else 3)
print("")
| 29.409524 | 84 | 0.589702 | [
"MIT"
] | franciscoadasme/pdb-bench | run.py | 3,088 | Python |
#!/usr/bin/env python3
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Various small unit tests
import io
import itertools
import json
import xml.etree.ElementTree
from yt_dlp.compat import (
compat_chr,
compat_etree_fromstring,
compat_getenv,
compat_HTMLParseError,
compat_os_name,
compat_setenv,
)
from yt_dlp.utils import (
Config,
DateRange,
ExtractorError,
InAdvancePagedList,
LazyList,
OnDemandPagedList,
age_restricted,
args_to_str,
base_url,
caesar,
clean_html,
clean_podcast_url,
cli_bool_option,
cli_option,
cli_valueless_option,
date_from_str,
datetime_from_str,
detect_exe_version,
determine_ext,
dfxp2srt,
dict_get,
encode_base_n,
encode_compat_str,
encodeFilename,
escape_rfc3986,
escape_url,
expand_path,
extract_attributes,
find_xpath_attr,
fix_xml_ampersands,
float_or_none,
format_bytes,
get_element_by_attribute,
get_element_by_class,
get_element_html_by_attribute,
get_element_html_by_class,
get_element_text_and_html_by_tag,
get_elements_by_attribute,
get_elements_by_class,
get_elements_html_by_attribute,
get_elements_html_by_class,
get_elements_text_and_html_by_attribute,
int_or_none,
intlist_to_bytes,
iri_to_uri,
is_html,
js_to_json,
limit_length,
locked_file,
lowercase_escape,
match_str,
merge_dicts,
mimetype2ext,
month_by_name,
multipart_encode,
ohdave_rsa_encrypt,
orderedSet,
parse_age_limit,
parse_bitrate,
parse_codecs,
parse_count,
parse_dfxp_time_expr,
parse_duration,
parse_filesize,
parse_iso8601,
parse_qs,
parse_resolution,
pkcs1pad,
prepend_extension,
read_batch_urls,
remove_end,
remove_quotes,
remove_start,
render_table,
replace_extension,
rot47,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
shell_quote,
smuggle_url,
str_to_int,
strip_jsonp,
strip_or_none,
subtitles_filename,
timeconvert,
unescapeHTML,
unified_strdate,
unified_timestamp,
unsmuggle_url,
update_url_query,
uppercase_escape,
url_basename,
url_or_none,
urlencode_postdata,
urljoin,
urshift,
version_tuple,
xpath_attr,
xpath_element,
xpath_text,
xpath_with_ns,
)
class TestUtil(unittest.TestCase):
def test_timeconvert(self):
self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename(''), '')
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = 'ä'
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = '\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430'
self.assertEqual(sanitize_filename(tests), tests)
self.assertEqual(
sanitize_filename('New World record at 0:12:34'),
'New World record at 0_12_34')
self.assertEqual(sanitize_filename('--gasdgf'), '--gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=True), '--gasdgf')
self.assertEqual(sanitize_filename('--gasdgf', is_id=False), '_-gasdgf')
self.assertEqual(sanitize_filename('.gasdgf'), '.gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=True), '.gasdgf')
self.assertEqual(sanitize_filename('.gasdgf', is_id=False), 'gasdgf')
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
tests = 'aäb\u4e2d\u56fd\u7684c'
self.assertEqual(sanitize_filename(tests, restricted=True), 'aab_c')
self.assertTrue(sanitize_filename('\xf6', restricted=True) != '') # No empty filename
forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
# Handle a common case more neatly
self.assertEqual(sanitize_filename('\u5927\u58f0\u5e26 - Song', restricted=True), 'Song')
self.assertEqual(sanitize_filename('\u603b\u7edf: Speech', restricted=True), 'Speech')
# .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(
'ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', restricted=True),
'AAAAAAAECEEEEIIIIDNOOOOOOOOEUUUUUYTHssaaaaaaaeceeeeiiiionooooooooeuuuuuythy')
def test_sanitize_ids(self):
self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
def test_sanitize_path(self):
if sys.platform != 'win32':
return
self.assertEqual(sanitize_path('abc'), 'abc')
self.assertEqual(sanitize_path('abc/def'), 'abc\\def')
self.assertEqual(sanitize_path('abc\\def'), 'abc\\def')
self.assertEqual(sanitize_path('abc|def'), 'abc#def')
self.assertEqual(sanitize_path('<>:"|?*'), '#######')
self.assertEqual(sanitize_path('C:/abc/def'), 'C:\\abc\\def')
self.assertEqual(sanitize_path('C?:/abc/def'), 'C##\\abc\\def')
self.assertEqual(sanitize_path('\\\\?\\UNC\\ComputerName\\abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\UNC/ComputerName/abc'), '\\\\?\\UNC\\ComputerName\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:/abc'), '\\\\?\\C:\\abc')
self.assertEqual(sanitize_path('\\\\?\\C:\\ab?c\\de:f'), '\\\\?\\C:\\ab#c\\de#f')
self.assertEqual(sanitize_path('\\\\?\\C:\\abc'), '\\\\?\\C:\\abc')
self.assertEqual(
sanitize_path('youtube/%(uploader)s/%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s'),
'youtube\\%(uploader)s\\%(autonumber)s-%(title)s-%(upload_date)s.%(ext)s')
self.assertEqual(
sanitize_path('youtube/TheWreckingYard ./00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part'),
'youtube\\TheWreckingYard #\\00001-Not bad, Especially for Free! (1987 Yamaha 700)-20141116.mp4.part')
self.assertEqual(sanitize_path('abc/def...'), 'abc\\def..#')
self.assertEqual(sanitize_path('abc.../def'), 'abc..#\\def')
self.assertEqual(sanitize_path('abc.../def...'), 'abc..#\\def..#')
self.assertEqual(sanitize_path('../abc'), '..\\abc')
self.assertEqual(sanitize_path('../../abc'), '..\\..\\abc')
self.assertEqual(sanitize_path('./abc'), 'abc')
self.assertEqual(sanitize_path('./../abc'), '..\\abc')
def test_sanitize_url(self):
self.assertEqual(sanitize_url('//foo.bar'), 'http://foo.bar')
self.assertEqual(sanitize_url('httpss://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('rmtps://foo.bar'), 'rtmps://foo.bar')
self.assertEqual(sanitize_url('https://foo.bar'), 'https://foo.bar')
self.assertEqual(sanitize_url('foo bar'), 'foo bar')
def test_extract_basic_auth(self):
auth_header = lambda url: sanitized_Request(url).get_header('Authorization')
self.assertFalse(auth_header('http://foo.bar'))
self.assertFalse(auth_header('http://:foo.bar'))
self.assertEqual(auth_header('http://@foo.bar'), 'Basic Og==')
self.assertEqual(auth_header('http://:[email protected]'), 'Basic OnBhc3M=')
self.assertEqual(auth_header('http://user:@foo.bar'), 'Basic dXNlcjo=')
self.assertEqual(auth_header('http://user:[email protected]'), 'Basic dXNlcjpwYXNz')
def test_expand_path(self):
def env(var):
return f'%{var}%' if sys.platform == 'win32' else f'${var}'
compat_setenv('yt_dlp_EXPATH_PATH', 'expanded')
self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded')
self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME'))
self.assertEqual(expand_path('~'), compat_getenv('HOME'))
self.assertEqual(
expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')),
'%s/expanded' % compat_getenv('HOME'))
def test_prepend_extension(self):
self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.ext', 'temp', 'ext'), 'abc.temp.ext')
self.assertEqual(prepend_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(prepend_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(prepend_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(prepend_extension('.abc.ext', 'temp'), '.abc.temp.ext')
def test_replace_extension(self):
self.assertEqual(replace_extension('abc.ext', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('abc.ext', 'temp', 'ext'), 'abc.temp')
self.assertEqual(replace_extension('abc.unexpected_ext', 'temp', 'ext'), 'abc.unexpected_ext.temp')
self.assertEqual(replace_extension('abc', 'temp'), 'abc.temp')
self.assertEqual(replace_extension('.abc', 'temp'), '.abc.temp')
self.assertEqual(replace_extension('.abc.ext', 'temp'), '.abc.temp')
def test_subtitles_filename(self):
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.ext', 'en', 'vtt', 'ext'), 'abc.en.vtt')
self.assertEqual(subtitles_filename('abc.unexpected_ext', 'en', 'vtt', 'ext'), 'abc.unexpected_ext.en.vtt')
def test_remove_start(self):
self.assertEqual(remove_start(None, 'A - '), None)
self.assertEqual(remove_start('A - B', 'A - '), 'B')
self.assertEqual(remove_start('B - A', 'A - '), 'B - A')
def test_remove_end(self):
self.assertEqual(remove_end(None, ' - B'), None)
self.assertEqual(remove_end('A - B', ' - B'), 'A')
self.assertEqual(remove_end('B - A', ' - B'), 'B - A')
def test_remove_quotes(self):
self.assertEqual(remove_quotes(None), None)
self.assertEqual(remove_quotes('"'), '"')
self.assertEqual(remove_quotes("'"), "'")
self.assertEqual(remove_quotes(';'), ';')
self.assertEqual(remove_quotes('";'), '";')
self.assertEqual(remove_quotes('""'), '')
self.assertEqual(remove_quotes('";"'), ';')
def test_ordered_set(self):
self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
# keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_unescape_html(self):
self.assertEqual(unescapeHTML('%20;'), '%20;')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('/'), '/')
self.assertEqual(unescapeHTML('é'), 'é')
self.assertEqual(unescapeHTML('�'), '�')
self.assertEqual(unescapeHTML('&a"'), '&a"')
# HTML5 entities
self.assertEqual(unescapeHTML('.''), '.\'')
def test_date_from_str(self):
self.assertEqual(date_from_str('yesterday'), date_from_str('now-1day'))
self.assertEqual(date_from_str('now+7day'), date_from_str('now+1week'))
self.assertEqual(date_from_str('now+14day'), date_from_str('now+2week'))
self.assertEqual(date_from_str('20200229+365day'), date_from_str('20200229+1year'))
self.assertEqual(date_from_str('20210131+28day'), date_from_str('20210131+1month'))
def test_datetime_from_str(self):
self.assertEqual(datetime_from_str('yesterday', precision='day'), datetime_from_str('now-1day', precision='auto'))
self.assertEqual(datetime_from_str('now+7day', precision='day'), datetime_from_str('now+1week', precision='auto'))
self.assertEqual(datetime_from_str('now+14day', precision='day'), datetime_from_str('now+2week', precision='auto'))
self.assertEqual(datetime_from_str('20200229+365day', precision='day'), datetime_from_str('20200229+1year', precision='auto'))
self.assertEqual(datetime_from_str('20210131+28day', precision='day'), datetime_from_str('20210131+1month', precision='auto'))
self.assertEqual(datetime_from_str('20210131+59day', precision='day'), datetime_from_str('20210131+2month', precision='auto'))
self.assertEqual(datetime_from_str('now+1day', precision='hour'), datetime_from_str('now+24hours', precision='auto'))
self.assertEqual(datetime_from_str('now+23hours', precision='hour'), datetime_from_str('now+23hours', precision='auto'))
def test_daterange(self):
_20century = DateRange("19000101", "20000101")
self.assertFalse("17890714" in _20century)
_ac = DateRange("00010101")
self.assertTrue("19690721" in _ac)
_firstmilenium = DateRange(end="10000101")
self.assertTrue("07110427" in _firstmilenium)
def test_unified_dates(self):
self.assertEqual(unified_strdate('December 21, 2010'), '20101221')
self.assertEqual(unified_strdate('8/7/2009'), '20090708')
self.assertEqual(unified_strdate('Dec 14, 2012'), '20121214')
self.assertEqual(unified_strdate('2012/10/11 01:56:38 +0000'), '20121011')
self.assertEqual(unified_strdate('1968 12 10'), '19681210')
self.assertEqual(unified_strdate('1968-12-10'), '19681210')
self.assertEqual(unified_strdate('28/01/2014 21:00:00 +0100'), '20140128')
self.assertEqual(
unified_strdate('11/26/2014 11:30:00 AM PST', day_first=False),
'20141126')
self.assertEqual(
unified_strdate('2/2/2015 6:47:40 PM', day_first=False),
'20150202')
self.assertEqual(unified_strdate('Feb 14th 2016 5:45PM'), '20160214')
self.assertEqual(unified_strdate('25-09-2014'), '20140925')
self.assertEqual(unified_strdate('27.02.2016 17:30'), '20160227')
self.assertEqual(unified_strdate('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_strdate('Feb 7, 2016 at 6:35 pm'), '20160207')
self.assertEqual(unified_strdate('July 15th, 2013'), '20130715')
self.assertEqual(unified_strdate('September 1st, 2013'), '20130901')
self.assertEqual(unified_strdate('Sep 2nd, 2013'), '20130902')
self.assertEqual(unified_strdate('November 3rd, 2019'), '20191103')
self.assertEqual(unified_strdate('October 23rd, 2005'), '20051023')
def test_unified_timestamps(self):
self.assertEqual(unified_timestamp('December 21, 2010'), 1292889600)
self.assertEqual(unified_timestamp('8/7/2009'), 1247011200)
self.assertEqual(unified_timestamp('Dec 14, 2012'), 1355443200)
self.assertEqual(unified_timestamp('2012/10/11 01:56:38 +0000'), 1349920598)
self.assertEqual(unified_timestamp('1968 12 10'), -33436800)
self.assertEqual(unified_timestamp('1968-12-10'), -33436800)
self.assertEqual(unified_timestamp('28/01/2014 21:00:00 +0100'), 1390939200)
self.assertEqual(
unified_timestamp('11/26/2014 11:30:00 AM PST', day_first=False),
1417001400)
self.assertEqual(
unified_timestamp('2/2/2015 6:47:40 PM', day_first=False),
1422902860)
self.assertEqual(unified_timestamp('Feb 14th 2016 5:45PM'), 1455471900)
self.assertEqual(unified_timestamp('25-09-2014'), 1411603200)
self.assertEqual(unified_timestamp('27.02.2016 17:30'), 1456594200)
self.assertEqual(unified_timestamp('UNKNOWN DATE FORMAT'), None)
self.assertEqual(unified_timestamp('May 16, 2016 11:15 PM'), 1463440500)
self.assertEqual(unified_timestamp('Feb 7, 2016 at 6:35 pm'), 1454870100)
self.assertEqual(unified_timestamp('2017-03-30T17:52:41Q'), 1490896361)
self.assertEqual(unified_timestamp('Sep 11, 2013 | 5:49 AM'), 1378878540)
self.assertEqual(unified_timestamp('December 15, 2017 at 7:49 am'), 1513324140)
self.assertEqual(unified_timestamp('2018-03-14T08:32:43.1493874+00:00'), 1521016363)
def test_determine_ext(self):
self.assertEqual(determine_ext('http://example.com/foo/bar.mp4/?download'), 'mp4')
self.assertEqual(determine_ext('http://example.com/foo/bar/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.nonext/?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar/mp4?download', None), None)
self.assertEqual(determine_ext('http://example.com/foo/bar.m3u8//?download'), 'm3u8')
self.assertEqual(determine_ext('foobar', None), None)
def test_find_xpath_attr(self):
testxml = '''<root>
<node/>
<node x="a"/>
<node x="a" y="c" />
<node x="b" y="d" />
<node x="" />
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/fourohfour', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'n', 'v'), None)
self.assertEqual(find_xpath_attr(doc, './/node', 'x'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'a'), doc[1])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', 'b'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'y'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'c'), doc[2])
self.assertEqual(find_xpath_attr(doc, './/node', 'y', 'd'), doc[3])
self.assertEqual(find_xpath_attr(doc, './/node', 'x', ''), doc[4])
def test_xpath_with_ns(self):
testxml = '''<root xmlns:media="http://example.com/">
<media:song>
<media:author>The Author</media:author>
<url>http://server.com/download.mp3</url>
</media:song>
</root>'''
doc = compat_etree_fromstring(testxml)
find = lambda p: doc.find(xpath_with_ns(p, {'media': 'http://example.com/'}))
self.assertTrue(find('media:song') is not None)
self.assertEqual(find('media:song/media:author').text, 'The Author')
self.assertEqual(find('media:song/url').text, 'http://server.com/download.mp3')
def test_xpath_element(self):
doc = xml.etree.ElementTree.Element('root')
div = xml.etree.ElementTree.SubElement(doc, 'div')
p = xml.etree.ElementTree.SubElement(div, 'p')
p.text = 'Foo'
self.assertEqual(xpath_element(doc, 'div/p'), p)
self.assertEqual(xpath_element(doc, ['div/p']), p)
self.assertEqual(xpath_element(doc, ['div/bar', 'div/p']), p)
self.assertEqual(xpath_element(doc, 'div/bar', default='default'), 'default')
self.assertEqual(xpath_element(doc, ['div/bar'], default='default'), 'default')
self.assertTrue(xpath_element(doc, 'div/bar') is None)
self.assertTrue(xpath_element(doc, ['div/bar']) is None)
self.assertTrue(xpath_element(doc, ['div/bar'], 'div/baz') is None)
self.assertRaises(ExtractorError, xpath_element, doc, 'div/bar', fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar'], fatal=True)
self.assertRaises(ExtractorError, xpath_element, doc, ['div/bar', 'div/baz'], fatal=True)
def test_xpath_text(self):
testxml = '''<root>
<div>
<p>Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_text(doc, 'div/p'), 'Foo')
self.assertEqual(xpath_text(doc, 'div/bar', default='default'), 'default')
self.assertTrue(xpath_text(doc, 'div/bar') is None)
self.assertRaises(ExtractorError, xpath_text, doc, 'div/bar', fatal=True)
def test_xpath_attr(self):
testxml = '''<root>
<div>
<p x="a">Foo</p>
</div>
</root>'''
doc = compat_etree_fromstring(testxml)
self.assertEqual(xpath_attr(doc, 'div/p', 'x'), 'a')
self.assertEqual(xpath_attr(doc, 'div/bar', 'x'), None)
self.assertEqual(xpath_attr(doc, 'div/p', 'y'), None)
self.assertEqual(xpath_attr(doc, 'div/bar', 'x', default='default'), 'default')
self.assertEqual(xpath_attr(doc, 'div/p', 'y', default='default'), 'default')
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/bar', 'x', fatal=True)
self.assertRaises(ExtractorError, xpath_attr, doc, 'div/p', 'y', fatal=True)
def test_smuggle_url(self):
data = {"ö": "ö", "abc": [3]}
url = 'https://foo.bar/baz?x=y#a'
smug_url = smuggle_url(url, data)
unsmug_url, unsmug_data = unsmuggle_url(smug_url)
self.assertEqual(url, unsmug_url)
self.assertEqual(data, unsmug_data)
res_url, res_data = unsmuggle_url(url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, None)
smug_url = smuggle_url(url, {'a': 'b'})
smug_smug_url = smuggle_url(smug_url, {'c': 'd'})
res_url, res_data = unsmuggle_url(smug_smug_url)
self.assertEqual(res_url, url)
self.assertEqual(res_data, {'a': 'b', 'c': 'd'})
def test_shell_quote(self):
args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')]
self.assertEqual(
shell_quote(args),
"""ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''')
def test_float_or_none(self):
self.assertEqual(float_or_none('42.42'), 42.42)
self.assertEqual(float_or_none('42'), 42.0)
self.assertEqual(float_or_none(''), None)
self.assertEqual(float_or_none(None), None)
self.assertEqual(float_or_none([]), None)
self.assertEqual(float_or_none(set()), None)
def test_int_or_none(self):
self.assertEqual(int_or_none('42'), 42)
self.assertEqual(int_or_none(''), None)
self.assertEqual(int_or_none(None), None)
self.assertEqual(int_or_none([]), None)
self.assertEqual(int_or_none(set()), None)
def test_str_to_int(self):
self.assertEqual(str_to_int('123,456'), 123456)
self.assertEqual(str_to_int('123.456'), 123456)
self.assertEqual(str_to_int(523), 523)
self.assertEqual(str_to_int('noninteger'), None)
self.assertEqual(str_to_int([]), None)
def test_url_basename(self):
self.assertEqual(url_basename('http://foo.de/'), '')
self.assertEqual(url_basename('http://foo.de/bar/baz'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz?x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz#x=y'), 'baz')
self.assertEqual(url_basename('http://foo.de/bar/baz/'), 'baz')
self.assertEqual(
url_basename('http://media.w3.org/2010/05/sintel/trailer.mp4'),
'trailer.mp4')
def test_base_url(self):
self.assertEqual(base_url('http://foo.de/'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar'), 'http://foo.de/')
self.assertEqual(base_url('http://foo.de/bar/'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz'), 'http://foo.de/bar/')
self.assertEqual(base_url('http://foo.de/bar/baz?x=z/x/c'), 'http://foo.de/bar/')
def test_urljoin(self):
self.assertEqual(urljoin('http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(b'http://foo.de/', b'/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('//foo.de/', '/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', '/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de', 'a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(None, '//foo.de/a/b/c.txt'), '//foo.de/a/b/c.txt')
self.assertEqual(urljoin('', 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin(['foobar'], 'http://foo.de/a/b/c.txt'), 'http://foo.de/a/b/c.txt')
self.assertEqual(urljoin('http://foo.de/', None), None)
self.assertEqual(urljoin('http://foo.de/', ''), None)
self.assertEqual(urljoin('http://foo.de/', ['foobar']), None)
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', '.././../d.txt'), 'http://foo.de/d.txt')
self.assertEqual(urljoin('http://foo.de/a/b/c.txt', 'rtmp://foo.de'), 'rtmp://foo.de')
self.assertEqual(urljoin(None, 'rtmp://foo.de'), 'rtmp://foo.de')
def test_url_or_none(self):
self.assertEqual(url_or_none(None), None)
self.assertEqual(url_or_none(''), None)
self.assertEqual(url_or_none('foo'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('https://foo.de'), 'https://foo.de')
self.assertEqual(url_or_none('http$://foo.de'), None)
self.assertEqual(url_or_none('http://foo.de'), 'http://foo.de')
self.assertEqual(url_or_none('//foo.de'), '//foo.de')
self.assertEqual(url_or_none('s3://foo.de'), None)
self.assertEqual(url_or_none('rtmpte://foo.de'), 'rtmpte://foo.de')
self.assertEqual(url_or_none('mms://foo.de'), 'mms://foo.de')
self.assertEqual(url_or_none('rtspu://foo.de'), 'rtspu://foo.de')
self.assertEqual(url_or_none('ftps://foo.de'), 'ftps://foo.de')
def test_parse_age_limit(self):
self.assertEqual(parse_age_limit(None), None)
self.assertEqual(parse_age_limit(False), None)
self.assertEqual(parse_age_limit('invalid'), None)
self.assertEqual(parse_age_limit(0), 0)
self.assertEqual(parse_age_limit(18), 18)
self.assertEqual(parse_age_limit(21), 21)
self.assertEqual(parse_age_limit(22), None)
self.assertEqual(parse_age_limit('18'), 18)
self.assertEqual(parse_age_limit('18+'), 18)
self.assertEqual(parse_age_limit('PG-13'), 13)
self.assertEqual(parse_age_limit('TV-14'), 14)
self.assertEqual(parse_age_limit('TV-MA'), 17)
self.assertEqual(parse_age_limit('TV14'), 14)
self.assertEqual(parse_age_limit('TV_G'), 0)
def test_parse_duration(self):
self.assertEqual(parse_duration(None), None)
self.assertEqual(parse_duration(False), None)
self.assertEqual(parse_duration('invalid'), None)
self.assertEqual(parse_duration('1'), 1)
self.assertEqual(parse_duration('1337:12'), 80232)
self.assertEqual(parse_duration('9:12:43'), 33163)
self.assertEqual(parse_duration('12:00'), 720)
self.assertEqual(parse_duration('00:01:01'), 61)
self.assertEqual(parse_duration('x:y'), None)
self.assertEqual(parse_duration('3h11m53s'), 11513)
self.assertEqual(parse_duration('3h 11m 53s'), 11513)
self.assertEqual(parse_duration('3 hours 11 minutes 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours 11 mins 53 secs'), 11513)
self.assertEqual(parse_duration('3 hours, 11 minutes, 53 seconds'), 11513)
self.assertEqual(parse_duration('3 hours, 11 mins, 53 secs'), 11513)
self.assertEqual(parse_duration('62m45s'), 3765)
self.assertEqual(parse_duration('6m59s'), 419)
self.assertEqual(parse_duration('49s'), 49)
self.assertEqual(parse_duration('0h0m0s'), 0)
self.assertEqual(parse_duration('0m0s'), 0)
self.assertEqual(parse_duration('0s'), 0)
self.assertEqual(parse_duration('01:02:03.05'), 3723.05)
self.assertEqual(parse_duration('T30M38S'), 1838)
self.assertEqual(parse_duration('5 s'), 5)
self.assertEqual(parse_duration('3 min'), 180)
self.assertEqual(parse_duration('2.5 hours'), 9000)
self.assertEqual(parse_duration('02:03:04'), 7384)
self.assertEqual(parse_duration('01:02:03:04'), 93784)
self.assertEqual(parse_duration('1 hour 3 minutes'), 3780)
self.assertEqual(parse_duration('87 Min.'), 5220)
self.assertEqual(parse_duration('PT1H0.040S'), 3600.04)
self.assertEqual(parse_duration('PT00H03M30SZ'), 210)
self.assertEqual(parse_duration('P0Y0M0DT0H4M20.880S'), 260.88)
self.assertEqual(parse_duration('01:02:03:050'), 3723.05)
self.assertEqual(parse_duration('103:050'), 103.05)
def test_fix_xml_ampersands(self):
self.assertEqual(
fix_xml_ampersands('"&x=y&z=a'), '"&x=y&z=a')
self.assertEqual(
fix_xml_ampersands('"&x=y&wrong;&z=a'),
'"&x=y&wrong;&z=a')
self.assertEqual(
fix_xml_ampersands('&'><"'),
'&'><"')
self.assertEqual(
fix_xml_ampersands('Ӓ᪼'), 'Ӓ᪼')
self.assertEqual(fix_xml_ampersands('&#&#'), '&#&#')
def test_paged_list(self):
def testPL(size, pagesize, sliceargs, expected):
def get_page(pagenum):
firstid = pagenum * pagesize
upto = min(size, pagenum * pagesize + pagesize)
yield from range(firstid, upto)
pl = OnDemandPagedList(get_page, pagesize)
got = pl.getslice(*sliceargs)
self.assertEqual(got, expected)
iapl = InAdvancePagedList(get_page, size // pagesize + 1, pagesize)
got = iapl.getslice(*sliceargs)
self.assertEqual(got, expected)
testPL(5, 2, (), [0, 1, 2, 3, 4])
testPL(5, 2, (1,), [1, 2, 3, 4])
testPL(5, 2, (2,), [2, 3, 4])
testPL(5, 2, (4,), [4])
testPL(5, 2, (0, 3), [0, 1, 2])
testPL(5, 2, (1, 4), [1, 2, 3])
testPL(5, 2, (2, 99), [2, 3, 4])
testPL(5, 2, (20, 99), [])
def test_read_batch_urls(self):
f = io.StringIO('''\xef\xbb\xbf foo
bar\r
baz
# More after this line\r
; or after this
bam''')
self.assertEqual(read_batch_urls(f), ['foo', 'bar', 'baz', 'bam'])
def test_urlencode_postdata(self):
data = urlencode_postdata({'username': '[email protected]', 'password': '1234'})
self.assertTrue(isinstance(data, bytes))
def test_update_url_query(self):
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'quality': ['HD'], 'format': ['mp4']})),
parse_qs('http://example.com/path?quality=HD&format=mp4'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'system': ['LINUX', 'WINDOWS']})),
parse_qs('http://example.com/path?system=LINUX&system=WINDOWS'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': 'id,formats,subtitles'})),
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': ('id,formats,subtitles', 'thumbnails')})),
parse_qs('http://example.com/path?fields=id,formats,subtitles&fields=thumbnails'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path?manifest=f4m', {'manifest': []})),
parse_qs('http://example.com/path'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path?system=LINUX&system=WINDOWS', {'system': 'LINUX'})),
parse_qs('http://example.com/path?system=LINUX'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'fields': b'id,formats,subtitles'})),
parse_qs('http://example.com/path?fields=id,formats,subtitles'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'width': 1080, 'height': 720})),
parse_qs('http://example.com/path?width=1080&height=720'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'bitrate': 5020.43})),
parse_qs('http://example.com/path?bitrate=5020.43'))
self.assertEqual(parse_qs(update_url_query(
'http://example.com/path', {'test': '第二行тест'})),
parse_qs('http://example.com/path?test=%E7%AC%AC%E4%BA%8C%E8%A1%8C%D1%82%D0%B5%D1%81%D1%82'))
def test_multipart_encode(self):
self.assertEqual(
multipart_encode({b'field': b'value'}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="field"\r\n\r\nvalue\r\n--AAAAAA--\r\n')
self.assertEqual(
multipart_encode({'欄位'.encode(): '值'.encode()}, boundary='AAAAAA')[0],
b'--AAAAAA\r\nContent-Disposition: form-data; name="\xe6\xac\x84\xe4\xbd\x8d"\r\n\r\n\xe5\x80\xbc\r\n--AAAAAA--\r\n')
self.assertRaises(
ValueError, multipart_encode, {b'field': b'value'}, boundary='value')
def test_dict_get(self):
FALSE_VALUES = {
'none': None,
'false': False,
'zero': 0,
'empty_string': '',
'empty_list': [],
}
d = FALSE_VALUES.copy()
d['a'] = 42
self.assertEqual(dict_get(d, 'a'), 42)
self.assertEqual(dict_get(d, 'b'), None)
self.assertEqual(dict_get(d, 'b', 42), 42)
self.assertEqual(dict_get(d, ('a', )), 42)
self.assertEqual(dict_get(d, ('b', 'a', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', 'a', 'd', )), 42)
self.assertEqual(dict_get(d, ('b', 'c', )), None)
self.assertEqual(dict_get(d, ('b', 'c', ), 42), 42)
for key, false_value in FALSE_VALUES.items():
self.assertEqual(dict_get(d, ('b', 'c', key, )), None)
self.assertEqual(dict_get(d, ('b', 'c', key, ), skip_false_values=False), false_value)
def test_merge_dicts(self):
self.assertEqual(merge_dicts({'a': 1}, {'b': 2}), {'a': 1, 'b': 2})
self.assertEqual(merge_dicts({'a': 1}, {'a': 2}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': None}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {'a': ''}), {'a': 1})
self.assertEqual(merge_dicts({'a': 1}, {}), {'a': 1})
self.assertEqual(merge_dicts({'a': None}, {'a': 1}), {'a': 1})
self.assertEqual(merge_dicts({'a': ''}, {'a': 1}), {'a': ''})
self.assertEqual(merge_dicts({'a': ''}, {'a': 'abc'}), {'a': 'abc'})
self.assertEqual(merge_dicts({'a': None}, {'a': ''}, {'a': 'abc'}), {'a': 'abc'})
def test_encode_compat_str(self):
self.assertEqual(encode_compat_str(b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82', 'utf-8'), 'тест')
self.assertEqual(encode_compat_str('тест', 'utf-8'), 'тест')
def test_parse_iso8601(self):
self.assertEqual(parse_iso8601('2014-03-23T23:04:26+0100'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26+0000'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26Z'), 1395612266)
self.assertEqual(parse_iso8601('2014-03-23T22:04:26.1234Z'), 1395612266)
self.assertEqual(parse_iso8601('2015-09-29T08:27:31.727'), 1443515251)
self.assertEqual(parse_iso8601('2015-09-29T08-27-31.727'), None)
def test_strip_jsonp(self):
stripped = strip_jsonp('cb ([ {"id":"532cb",\n\n\n"x":\n3}\n]\n);')
d = json.loads(stripped)
self.assertEqual(d, [{"id": "532cb", "x": 3}])
stripped = strip_jsonp('parseMetadata({"STATUS":"OK"})\n\n\n//epc')
d = json.loads(stripped)
self.assertEqual(d, {'STATUS': 'OK'})
stripped = strip_jsonp('ps.embedHandler({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && window.cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('window.cb && cb({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
stripped = strip_jsonp('({"status": "success"});')
d = json.loads(stripped)
self.assertEqual(d, {'status': 'success'})
def test_strip_or_none(self):
self.assertEqual(strip_or_none(' abc'), 'abc')
self.assertEqual(strip_or_none('abc '), 'abc')
self.assertEqual(strip_or_none(' abc '), 'abc')
self.assertEqual(strip_or_none('\tabc\t'), 'abc')
self.assertEqual(strip_or_none('\n\tabc\n\t'), 'abc')
self.assertEqual(strip_or_none('abc'), 'abc')
self.assertEqual(strip_or_none(''), '')
self.assertEqual(strip_or_none(None), None)
self.assertEqual(strip_or_none(42), None)
self.assertEqual(strip_or_none([]), None)
def test_uppercase_escape(self):
self.assertEqual(uppercase_escape('aä'), 'aä')
self.assertEqual(uppercase_escape('\\U0001d550'), '𝕐')
def test_lowercase_escape(self):
self.assertEqual(lowercase_escape('aä'), 'aä')
self.assertEqual(lowercase_escape('\\u0026'), '&')
def test_limit_length(self):
self.assertEqual(limit_length(None, 12), None)
self.assertEqual(limit_length('foo', 12), 'foo')
self.assertTrue(
limit_length('foo bar baz asd', 12).startswith('foo bar'))
self.assertTrue('...' in limit_length('foo bar baz asd', 12))
def test_mimetype2ext(self):
self.assertEqual(mimetype2ext(None), None)
self.assertEqual(mimetype2ext('video/x-flv'), 'flv')
self.assertEqual(mimetype2ext('application/x-mpegURL'), 'm3u8')
self.assertEqual(mimetype2ext('text/vtt'), 'vtt')
self.assertEqual(mimetype2ext('text/vtt;charset=utf-8'), 'vtt')
self.assertEqual(mimetype2ext('text/html; charset=utf-8'), 'html')
self.assertEqual(mimetype2ext('audio/x-wav'), 'wav')
self.assertEqual(mimetype2ext('audio/x-wav;codec=pcm'), 'wav')
def test_month_by_name(self):
self.assertEqual(month_by_name(None), None)
self.assertEqual(month_by_name('December', 'en'), 12)
self.assertEqual(month_by_name('décembre', 'fr'), 12)
self.assertEqual(month_by_name('December'), 12)
self.assertEqual(month_by_name('décembre'), None)
self.assertEqual(month_by_name('Unknown', 'unknown'), None)
def test_parse_codecs(self):
self.assertEqual(parse_codecs(''), {})
self.assertEqual(parse_codecs('avc1.77.30, mp4a.40.2'), {
'vcodec': 'avc1.77.30',
'acodec': 'mp4a.40.2',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('mp4a.40.2'), {
'vcodec': 'none',
'acodec': 'mp4a.40.2',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('mp4a.40.5,avc1.42001e'), {
'vcodec': 'avc1.42001e',
'acodec': 'mp4a.40.5',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('avc3.640028'), {
'vcodec': 'avc3.640028',
'acodec': 'none',
'dynamic_range': None,
})
self.assertEqual(parse_codecs(', h264,,newcodec,aac'), {
'vcodec': 'h264',
'acodec': 'aac',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('av01.0.05M.08'), {
'vcodec': 'av01.0.05M.08',
'acodec': 'none',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('vp9.2'), {
'vcodec': 'vp9.2',
'acodec': 'none',
'dynamic_range': 'HDR10',
})
self.assertEqual(parse_codecs('av01.0.12M.10.0.110.09.16.09.0'), {
'vcodec': 'av01.0.12M.10',
'acodec': 'none',
'dynamic_range': 'HDR10',
})
self.assertEqual(parse_codecs('dvhe'), {
'vcodec': 'dvhe',
'acodec': 'none',
'dynamic_range': 'DV',
})
self.assertEqual(parse_codecs('theora, vorbis'), {
'vcodec': 'theora',
'acodec': 'vorbis',
'dynamic_range': None,
})
self.assertEqual(parse_codecs('unknownvcodec, unknownacodec'), {
'vcodec': 'unknownvcodec',
'acodec': 'unknownacodec',
})
self.assertEqual(parse_codecs('unknown'), {})
def test_escape_rfc3986(self):
reserved = "!*'();:@&=+$,/?#[]"
unreserved = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_.~'
self.assertEqual(escape_rfc3986(reserved), reserved)
self.assertEqual(escape_rfc3986(unreserved), unreserved)
self.assertEqual(escape_rfc3986('тест'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('%D1%82%D0%B5%D1%81%D1%82'), '%D1%82%D0%B5%D1%81%D1%82')
self.assertEqual(escape_rfc3986('foo bar'), 'foo%20bar')
self.assertEqual(escape_rfc3986('foo%20bar'), 'foo%20bar')
def test_escape_url(self):
self.assertEqual(
escape_url('http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavré_FD.mp4'),
'http://wowza.imust.org/srv/vod/telemb/new/UPLOAD/UPLOAD/20224_IncendieHavre%CC%81_FD.mp4'
)
self.assertEqual(
escape_url('http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erklärt/Das-Erste/Video?documentId=22673108&bcastId=5290'),
'http://www.ardmediathek.de/tv/Sturm-der-Liebe/Folge-2036-Zu-Mann-und-Frau-erkl%C3%A4rt/Das-Erste/Video?documentId=22673108&bcastId=5290'
)
self.assertEqual(
escape_url('http://тест.рф/фрагмент'),
'http://xn--e1aybc.xn--p1ai/%D1%84%D1%80%D0%B0%D0%B3%D0%BC%D0%B5%D0%BD%D1%82'
)
self.assertEqual(
escape_url('http://тест.рф/абв?абв=абв#абв'),
'http://xn--e1aybc.xn--p1ai/%D0%B0%D0%B1%D0%B2?%D0%B0%D0%B1%D0%B2=%D0%B0%D0%B1%D0%B2#%D0%B0%D0%B1%D0%B2'
)
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
def test_js_to_json_realworld(self):
inp = '''{
'clip':{'provider':'pseudo'}
}'''
self.assertEqual(js_to_json(inp), '''{
"clip":{"provider":"pseudo"}
}''')
json.loads(js_to_json(inp))
inp = '''{
'playlist':[{'controls':{'all':null}}]
}'''
self.assertEqual(js_to_json(inp), '''{
"playlist":[{"controls":{"all":null}}]
}''')
inp = '''"The CW\\'s \\'Crazy Ex-Girlfriend\\'"'''
self.assertEqual(js_to_json(inp), '''"The CW's 'Crazy Ex-Girlfriend'"''')
inp = '"SAND Number: SAND 2013-7800P\\nPresenter: Tom Russo\\nHabanero Software Training - Xyce Software\\nXyce, Sandia\\u0027s"'
json_code = js_to_json(inp)
self.assertEqual(json.loads(json_code), json.loads(inp))
inp = '''{
0:{src:'skipped', type: 'application/dash+xml'},
1:{src:'skipped', type: 'application/vnd.apple.mpegURL'},
}'''
self.assertEqual(js_to_json(inp), '''{
"0":{"src":"skipped", "type": "application/dash+xml"},
"1":{"src":"skipped", "type": "application/vnd.apple.mpegURL"}
}''')
inp = '''{"foo":101}'''
self.assertEqual(js_to_json(inp), '''{"foo":101}''')
inp = '''{"duration": "00:01:07"}'''
self.assertEqual(js_to_json(inp), '''{"duration": "00:01:07"}''')
inp = '''{segments: [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}'''
self.assertEqual(js_to_json(inp), '''{"segments": [{"offset":-3.885780586188048e-16,"duration":39.75000000000001}]}''')
def test_js_to_json_edgecases(self):
on = js_to_json("{abc_def:'1\\'\\\\2\\\\\\'3\"4'}")
self.assertEqual(json.loads(on), {"abc_def": "1'\\2\\'3\"4"})
on = js_to_json('{"abc": true}')
self.assertEqual(json.loads(on), {'abc': True})
# Ignore JavaScript code as well
on = js_to_json('''{
"x": 1,
y: "a",
z: some.code
}''')
d = json.loads(on)
self.assertEqual(d['x'], 1)
self.assertEqual(d['y'], 'a')
# Just drop ! prefix for now though this results in a wrong value
on = js_to_json('''{
a: !0,
b: !1,
c: !!0,
d: !!42.42,
e: !!![],
f: !"abc",
g: !"",
!42: 42
}''')
self.assertEqual(json.loads(on), {
'a': 0,
'b': 1,
'c': 0,
'd': 42.42,
'e': [],
'f': "abc",
'g': "",
'42': 42
})
on = js_to_json('["abc", "def",]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[/*comment\n*/"abc"/*comment\n*/,/*comment\n*/"def",/*comment\n*/]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('[//comment\n"abc" //comment\n,//comment\n"def",//comment\n]')
self.assertEqual(json.loads(on), ['abc', 'def'])
on = js_to_json('{"abc": "def",}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{/*comment\n*/"abc"/*comment\n*/:/*comment\n*/"def"/*comment\n*/,/*comment\n*/}')
self.assertEqual(json.loads(on), {'abc': 'def'})
on = js_to_json('{ 0: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ /*comment\n*/0/*comment\n*/: /* " \n */ ",]" , }')
self.assertEqual(json.loads(on), {'0': ',]'})
on = js_to_json('{ 0: // comment\n1 }')
self.assertEqual(json.loads(on), {'0': 1})
on = js_to_json(r'["<p>x<\/p>"]')
self.assertEqual(json.loads(on), ['<p>x</p>'])
on = js_to_json(r'["\xaa"]')
self.assertEqual(json.loads(on), ['\u00aa'])
on = js_to_json("['a\\\nb']")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json("/*comment\n*/[/*comment\n*/'a\\\nb'/*comment\n*/]/*comment\n*/")
self.assertEqual(json.loads(on), ['ab'])
on = js_to_json('{0xff:0xff}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{/*comment\n*/0xff/*comment\n*/:/*comment\n*/0xff/*comment\n*/}')
self.assertEqual(json.loads(on), {'255': 255})
on = js_to_json('{077:077}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{/*comment\n*/077/*comment\n*/:/*comment\n*/077/*comment\n*/}')
self.assertEqual(json.loads(on), {'63': 63})
on = js_to_json('{42:42}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{/*comment\n*/42/*comment\n*/:/*comment\n*/42/*comment\n*/}')
self.assertEqual(json.loads(on), {'42': 42})
on = js_to_json('{42:4.2e1}')
self.assertEqual(json.loads(on), {'42': 42.0})
on = js_to_json('{ "0x40": "0x40" }')
self.assertEqual(json.loads(on), {'0x40': '0x40'})
on = js_to_json('{ "040": "040" }')
self.assertEqual(json.loads(on), {'040': '040'})
on = js_to_json('[1,//{},\n2]')
self.assertEqual(json.loads(on), [1, 2])
def test_js_to_json_malformed(self):
self.assertEqual(js_to_json('42a1'), '42"a1"')
self.assertEqual(js_to_json('42a-1'), '42"a"-1')
def test_extract_attributes(self):
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes("<e x='y'>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="a \'b\' c">'), {'x': "a 'b' c"})
self.assertEqual(extract_attributes('<e x=\'a "b" c\'>'), {'x': 'a "b" c'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="y">'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x="&">'), {'x': '&'}) # XML
self.assertEqual(extract_attributes('<e x=""">'), {'x': '"'})
self.assertEqual(extract_attributes('<e x="£">'), {'x': '£'}) # HTML 3.2
self.assertEqual(extract_attributes('<e x="λ">'), {'x': 'λ'}) # HTML 4.0
self.assertEqual(extract_attributes('<e x="&foo">'), {'x': '&foo'})
self.assertEqual(extract_attributes('<e x="\'">'), {'x': "'"})
self.assertEqual(extract_attributes('<e x=\'"\'>'), {'x': '"'})
self.assertEqual(extract_attributes('<e x >'), {'x': None})
self.assertEqual(extract_attributes('<e x=y a>'), {'x': 'y', 'a': None})
self.assertEqual(extract_attributes('<e x= y>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e x=1 y=2 x=3>'), {'y': '2', 'x': '3'})
self.assertEqual(extract_attributes('<e \nx=\ny\n>'), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx=\n"y"\n>'), {'x': 'y'})
self.assertEqual(extract_attributes("<e \nx=\n'y'\n>"), {'x': 'y'})
self.assertEqual(extract_attributes('<e \nx="\ny\n">'), {'x': '\ny\n'})
self.assertEqual(extract_attributes('<e CAPS=x>'), {'caps': 'x'}) # Names lowercased
self.assertEqual(extract_attributes('<e x=1 X=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e X=1 x=2>'), {'x': '2'})
self.assertEqual(extract_attributes('<e _:funny-name1=1>'), {'_:funny-name1': '1'})
self.assertEqual(extract_attributes('<e x="Fáilte 世界 \U0001f600">'), {'x': 'Fáilte 世界 \U0001f600'})
self.assertEqual(extract_attributes('<e x="décomposé">'), {'x': 'décompose\u0301'})
# "Narrow" Python builds don't support unicode code points outside BMP.
try:
compat_chr(0x10000)
supports_outside_bmp = True
except ValueError:
supports_outside_bmp = False
if supports_outside_bmp:
self.assertEqual(extract_attributes('<e x="Smile 😀!">'), {'x': 'Smile \U0001f600!'})
# Malformed HTML should not break attributes extraction on older Python
self.assertEqual(extract_attributes('<mal"formed/>'), {})
def test_clean_html(self):
self.assertEqual(clean_html('a:\nb'), 'a: b')
self.assertEqual(clean_html('a:\n "b"'), 'a: "b"')
self.assertEqual(clean_html('a<br>\xa0b'), 'a\nb')
def test_intlist_to_bytes(self):
self.assertEqual(
intlist_to_bytes([0, 1, 127, 128, 255]),
b'\x00\x01\x7f\x80\xff')
def test_args_to_str(self):
self.assertEqual(
args_to_str(['foo', 'ba/r', '-baz', '2 be', '']),
'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""'
)
def test_parse_filesize(self):
self.assertEqual(parse_filesize(None), None)
self.assertEqual(parse_filesize(''), None)
self.assertEqual(parse_filesize('91 B'), 91)
self.assertEqual(parse_filesize('foobar'), None)
self.assertEqual(parse_filesize('2 MiB'), 2097152)
self.assertEqual(parse_filesize('5 GB'), 5000000000)
self.assertEqual(parse_filesize('1.2Tb'), 1200000000000)
self.assertEqual(parse_filesize('1.2tb'), 1200000000000)
self.assertEqual(parse_filesize('1,24 KB'), 1240)
self.assertEqual(parse_filesize('1,24 kb'), 1240)
self.assertEqual(parse_filesize('8.5 megabytes'), 8500000)
def test_parse_count(self):
self.assertEqual(parse_count(None), None)
self.assertEqual(parse_count(''), None)
self.assertEqual(parse_count('0'), 0)
self.assertEqual(parse_count('1000'), 1000)
self.assertEqual(parse_count('1.000'), 1000)
self.assertEqual(parse_count('1.1k'), 1100)
self.assertEqual(parse_count('1.1 k'), 1100)
self.assertEqual(parse_count('1,1 k'), 1100)
self.assertEqual(parse_count('1.1kk'), 1100000)
self.assertEqual(parse_count('1.1kk '), 1100000)
self.assertEqual(parse_count('1,1kk'), 1100000)
self.assertEqual(parse_count('100 views'), 100)
self.assertEqual(parse_count('1,100 views'), 1100)
self.assertEqual(parse_count('1.1kk views'), 1100000)
self.assertEqual(parse_count('10M views'), 10000000)
self.assertEqual(parse_count('has 10M views'), 10000000)
def test_parse_resolution(self):
self.assertEqual(parse_resolution(None), {})
self.assertEqual(parse_resolution(''), {})
self.assertEqual(parse_resolution(' 1920x1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920×1080 '), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('1920 x 1080'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('720p'), {'height': 720})
self.assertEqual(parse_resolution('4k'), {'height': 2160})
self.assertEqual(parse_resolution('8K'), {'height': 4320})
self.assertEqual(parse_resolution('pre_1920x1080_post'), {'width': 1920, 'height': 1080})
self.assertEqual(parse_resolution('ep1x2'), {})
self.assertEqual(parse_resolution('1920, 1080'), {'width': 1920, 'height': 1080})
def test_parse_bitrate(self):
self.assertEqual(parse_bitrate(None), None)
self.assertEqual(parse_bitrate(''), None)
self.assertEqual(parse_bitrate('300kbps'), 300)
self.assertEqual(parse_bitrate('1500kbps'), 1500)
self.assertEqual(parse_bitrate('300 kbps'), 300)
def test_version_tuple(self):
self.assertEqual(version_tuple('1'), (1,))
self.assertEqual(version_tuple('10.23.344'), (10, 23, 344))
self.assertEqual(version_tuple('10.1-6'), (10, 1, 6)) # avconv style
def test_detect_exe_version(self):
self.assertEqual(detect_exe_version('''ffmpeg version 1.2.1
built on May 27 2013 08:37:26 with gcc 4.7 (Debian 4.7.3-4)
configuration: --prefix=/usr --extra-'''), '1.2.1')
self.assertEqual(detect_exe_version('''ffmpeg version N-63176-g1fb4685
built on May 15 2014 22:09:06 with gcc 4.8.2 (GCC)'''), 'N-63176-g1fb4685')
self.assertEqual(detect_exe_version('''X server found. dri2 connection failed!
Trying to open render node...
Success at /dev/dri/renderD128.
ffmpeg version 2.4.4 Copyright (c) 2000-2014 the FFmpeg ...'''), '2.4.4')
def test_age_restricted(self):
self.assertFalse(age_restricted(None, 10)) # unrestricted content
self.assertFalse(age_restricted(1, None)) # unrestricted policy
self.assertFalse(age_restricted(8, 10))
self.assertTrue(age_restricted(18, 14))
self.assertFalse(age_restricted(18, 18))
def test_is_html(self):
self.assertFalse(is_html(b'\x49\x44\x43<html'))
self.assertTrue(is_html(b'<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-8 with BOM
b'\xef\xbb\xbf<!DOCTYPE foo>\xaaa'))
self.assertTrue(is_html( # UTF-16-LE
b'\xff\xfe<\x00h\x00t\x00m\x00l\x00>\x00\xe4\x00'
))
self.assertTrue(is_html( # UTF-16-BE
b'\xfe\xff\x00<\x00h\x00t\x00m\x00l\x00>\x00\xe4'
))
self.assertTrue(is_html( # UTF-32-BE
b'\x00\x00\xFE\xFF\x00\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4'))
self.assertTrue(is_html( # UTF-32-LE
b'\xFF\xFE\x00\x00<\x00\x00\x00h\x00\x00\x00t\x00\x00\x00m\x00\x00\x00l\x00\x00\x00>\x00\x00\x00\xe4\x00\x00\x00'))
def test_render_table(self):
self.assertEqual(
render_table(
['a', 'empty', 'bcd'],
[[123, '', 4], [9999, '', 51]]),
'a empty bcd\n'
'123 4\n'
'9999 51')
self.assertEqual(
render_table(
['a', 'empty', 'bcd'],
[[123, '', 4], [9999, '', 51]],
hide_empty=True),
'a bcd\n'
'123 4\n'
'9999 51')
self.assertEqual(
render_table(
['\ta', 'bcd'],
[['1\t23', 4], ['\t9999', 51]]),
' a bcd\n'
'1 23 4\n'
'9999 51')
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]],
delim='-'),
'a bcd\n'
'--------\n'
'123 4\n'
'9999 51')
self.assertEqual(
render_table(
['a', 'bcd'],
[[123, 4], [9999, 51]],
delim='-', extra_gap=2),
'a bcd\n'
'----------\n'
'123 4\n'
'9999 51')
def test_match_str(self):
# Unary
self.assertFalse(match_str('xy', {'x': 1200}))
self.assertTrue(match_str('!xy', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 1200}))
self.assertFalse(match_str('!x', {'x': 1200}))
self.assertTrue(match_str('x', {'x': 0}))
self.assertTrue(match_str('is_live', {'is_live': True}))
self.assertFalse(match_str('is_live', {'is_live': False}))
self.assertFalse(match_str('is_live', {'is_live': None}))
self.assertFalse(match_str('is_live', {}))
self.assertFalse(match_str('!is_live', {'is_live': True}))
self.assertTrue(match_str('!is_live', {'is_live': False}))
self.assertTrue(match_str('!is_live', {'is_live': None}))
self.assertTrue(match_str('!is_live', {}))
self.assertTrue(match_str('title', {'title': 'abc'}))
self.assertTrue(match_str('title', {'title': ''}))
self.assertFalse(match_str('!title', {'title': 'abc'}))
self.assertFalse(match_str('!title', {'title': ''}))
# Numeric
self.assertFalse(match_str('x>0', {'x': 0}))
self.assertFalse(match_str('x>0', {}))
self.assertTrue(match_str('x>?0', {}))
self.assertTrue(match_str('x>1K', {'x': 1200}))
self.assertFalse(match_str('x>2K', {'x': 1200}))
self.assertTrue(match_str('x>=1200 & x < 1300', {'x': 1200}))
self.assertFalse(match_str('x>=1100 & x < 1200', {'x': 1200}))
self.assertTrue(match_str('x > 1:0:0', {'x': 3700}))
# String
self.assertFalse(match_str('y=a212', {'y': 'foobar42'}))
self.assertTrue(match_str('y=foobar42', {'y': 'foobar42'}))
self.assertFalse(match_str('y!=foobar42', {'y': 'foobar42'}))
self.assertTrue(match_str('y!=foobar2', {'y': 'foobar42'}))
self.assertTrue(match_str('y^=foo', {'y': 'foobar42'}))
self.assertFalse(match_str('y!^=foo', {'y': 'foobar42'}))
self.assertFalse(match_str('y^=bar', {'y': 'foobar42'}))
self.assertTrue(match_str('y!^=bar', {'y': 'foobar42'}))
self.assertRaises(ValueError, match_str, 'x^=42', {'x': 42})
self.assertTrue(match_str('y*=bar', {'y': 'foobar42'}))
self.assertFalse(match_str('y!*=bar', {'y': 'foobar42'}))
self.assertFalse(match_str('y*=baz', {'y': 'foobar42'}))
self.assertTrue(match_str('y!*=baz', {'y': 'foobar42'}))
self.assertTrue(match_str('y$=42', {'y': 'foobar42'}))
self.assertFalse(match_str('y$=43', {'y': 'foobar42'}))
# And
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 90, 'description': 'foo'}))
self.assertTrue(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 60, 'description': 'foo'}))
self.assertFalse(match_str(
'like_count > 100 & dislike_count <? 50 & description',
{'like_count': 190, 'dislike_count': 10}))
# Regex
self.assertTrue(match_str(r'x~=\bbar', {'x': 'foo bar'}))
self.assertFalse(match_str(r'x~=\bbar.+', {'x': 'foo bar'}))
self.assertFalse(match_str(r'x~=^FOO', {'x': 'foo bar'}))
self.assertTrue(match_str(r'x~=(?i)^FOO', {'x': 'foo bar'}))
# Quotes
self.assertTrue(match_str(r'x^="foo"', {'x': 'foo "bar"'}))
self.assertFalse(match_str(r'x^="foo "', {'x': 'foo "bar"'}))
self.assertFalse(match_str(r'x$="bar"', {'x': 'foo "bar"'}))
self.assertTrue(match_str(r'x$=" \"bar\""', {'x': 'foo "bar"'}))
# Escaping &
self.assertFalse(match_str(r'x=foo & bar', {'x': 'foo & bar'}))
self.assertTrue(match_str(r'x=foo \& bar', {'x': 'foo & bar'}))
self.assertTrue(match_str(r'x=foo \& bar & x^=foo', {'x': 'foo & bar'}))
self.assertTrue(match_str(r'x="foo \& bar" & x^=foo', {'x': 'foo & bar'}))
# Example from docs
self.assertTrue(match_str(
r"!is_live & like_count>?100 & description~='(?i)\bcats \& dogs\b'",
{'description': 'Raining Cats & Dogs'}))
# Incomplete
self.assertFalse(match_str('id!=foo', {'id': 'foo'}, True))
self.assertTrue(match_str('x', {'id': 'foo'}, True))
self.assertTrue(match_str('!x', {'id': 'foo'}, True))
self.assertFalse(match_str('x', {'id': 'foo'}, False))
def test_parse_dfxp_time_expr(self):
self.assertEqual(parse_dfxp_time_expr(None), None)
self.assertEqual(parse_dfxp_time_expr(''), None)
self.assertEqual(parse_dfxp_time_expr('0.1'), 0.1)
self.assertEqual(parse_dfxp_time_expr('0.1s'), 0.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01'), 1.0)
self.assertEqual(parse_dfxp_time_expr('00:00:01.100'), 1.1)
self.assertEqual(parse_dfxp_time_expr('00:00:01:100'), 1.1)
def test_dfxp2srt(self):
dfxp_data = '''<?xml version="1.0" encoding="UTF-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The following line contains Chinese characters and special symbols</p>
<p begin="1" end="2">第二行<br/>♪♪</p>
<p begin="2" dur="1"><span>Third<br/>Line</span></p>
<p begin="3" end="-1">Lines with invalid timestamps are ignored</p>
<p begin="-1" end="-1">Ignore, two</p>
<p begin="3" dur="-1">Ignored, three</p>
</div>
</body>
</tt>'''.encode()
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The following line contains Chinese characters and special symbols
2
00:00:01,000 --> 00:00:02,000
第二行
♪♪
3
00:00:02,000 --> 00:00:03,000
Third
Line
'''
self.assertEqual(dfxp2srt(dfxp_data), srt_data)
dfxp_data_no_default_namespace = b'''<?xml version="1.0" encoding="UTF-8"?>
<tt xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">The first line</p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:00,000 --> 00:00:01,000
The first line
'''
self.assertEqual(dfxp2srt(dfxp_data_no_default_namespace), srt_data)
dfxp_data_with_style = b'''<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/2006/10/ttaf1" xmlns:ttp="http://www.w3.org/2006/10/ttaf1#parameter" ttp:timeBase="media" xmlns:tts="http://www.w3.org/2006/10/ttaf1#style" xml:lang="en" xmlns:ttm="http://www.w3.org/2006/10/ttaf1#metadata">
<head>
<styling>
<style id="s2" style="s0" tts:color="cyan" tts:fontWeight="bold" />
<style id="s1" style="s0" tts:color="yellow" tts:fontStyle="italic" />
<style id="s3" style="s0" tts:color="lime" tts:textDecoration="underline" />
<style id="s0" tts:backgroundColor="black" tts:fontStyle="normal" tts:fontSize="16" tts:fontFamily="sansSerif" tts:color="white" />
</styling>
</head>
<body tts:textAlign="center" style="s0">
<div>
<p begin="00:00:02.08" id="p0" end="00:00:05.84">default style<span tts:color="red">custom style</span></p>
<p style="s2" begin="00:00:02.08" id="p0" end="00:00:05.84"><span tts:color="lime">part 1<br /></span><span tts:color="cyan">part 2</span></p>
<p style="s3" begin="00:00:05.84" id="p1" end="00:00:09.56">line 3<br />part 3</p>
<p style="s1" tts:textDecoration="underline" begin="00:00:09.56" id="p2" end="00:00:12.36"><span style="s2" tts:color="lime">inner<br /> </span>style</p>
</div>
</body>
</tt>'''
srt_data = '''1
00:00:02,080 --> 00:00:05,840
<font color="white" face="sansSerif" size="16">default style<font color="red">custom style</font></font>
2
00:00:02,080 --> 00:00:05,840
<b><font color="cyan" face="sansSerif" size="16"><font color="lime">part 1
</font>part 2</font></b>
3
00:00:05,840 --> 00:00:09,560
<u><font color="lime">line 3
part 3</font></u>
4
00:00:09,560 --> 00:00:12,360
<i><u><font color="yellow"><font color="lime">inner
</font>style</font></u></i>
'''
self.assertEqual(dfxp2srt(dfxp_data_with_style), srt_data)
dfxp_data_non_utf8 = '''<?xml version="1.0" encoding="UTF-16"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en" xmlns:tts="http://www.w3.org/ns/ttml#parameter">
<body>
<div xml:lang="en">
<p begin="0" end="1">Line 1</p>
<p begin="1" end="2">第二行</p>
</div>
</body>
</tt>'''.encode('utf-16')
srt_data = '''1
00:00:00,000 --> 00:00:01,000
Line 1
2
00:00:01,000 --> 00:00:02,000
第二行
'''
self.assertEqual(dfxp2srt(dfxp_data_non_utf8), srt_data)
def test_cli_option(self):
self.assertEqual(cli_option({'proxy': '127.0.0.1:3128'}, '--proxy', 'proxy'), ['--proxy', '127.0.0.1:3128'])
self.assertEqual(cli_option({'proxy': None}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({}, '--proxy', 'proxy'), [])
self.assertEqual(cli_option({'retries': 10}, '--retries', 'retries'), ['--retries', '10'])
def test_cli_valueless_option(self):
self.assertEqual(cli_valueless_option(
{'downloader': 'external'}, '--external-downloader', 'downloader', 'external'), ['--external-downloader'])
self.assertEqual(cli_valueless_option(
{'downloader': 'internal'}, '--external-downloader', 'downloader', 'external'), [])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'), ['--no-check-certificate'])
self.assertEqual(cli_valueless_option(
{'nocheckcertificate': False}, '--no-check-certificate', 'nocheckcertificate'), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': True}, '--no-check-certificate', 'checkcertificate', False), [])
self.assertEqual(cli_valueless_option(
{'checkcertificate': False}, '--no-check-certificate', 'checkcertificate', False), ['--no-check-certificate'])
def test_cli_bool_option(self):
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate'),
['--no-check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--no-check-certificate', 'nocheckcertificate', separator='='),
['--no-check-certificate=true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': True}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=false'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true'),
['--check-certificate', 'true'])
self.assertEqual(
cli_bool_option(
{'nocheckcertificate': False}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
['--check-certificate=true'])
self.assertEqual(
cli_bool_option(
{}, '--check-certificate', 'nocheckcertificate', 'false', 'true', '='),
[])
def test_ohdave_rsa_encrypt(self):
N = 0xab86b6371b5318aaa1d3c9e612a9f1264f372323c8c0f19875b5fc3b3fd3afcc1e5bec527aa94bfa85bffc157e4245aebda05389a5357b75115ac94f074aefcd
e = 65537
self.assertEqual(
ohdave_rsa_encrypt(b'aa111222', e, N),
'726664bd9a23fd0c70f9f1b84aab5e3905ce1e45a584e9cbcf9bcc7510338fc1986d6c599ff990d923aa43c51c0d9013cd572e13bc58f4ae48f2ed8c0b0ba881')
def test_pkcs1pad(self):
data = [1, 2, 3]
padded_data = pkcs1pad(data, 32)
self.assertEqual(padded_data[:2], [0, 2])
self.assertEqual(padded_data[28:], [0, 1, 2, 3])
self.assertRaises(ValueError, pkcs1pad, data, 8)
def test_encode_base_n(self):
self.assertEqual(encode_base_n(0, 30), '0')
self.assertEqual(encode_base_n(80, 30), '2k')
custom_table = '9876543210ZYXWVUTSRQPONMLKJIHGFEDCBA'
self.assertEqual(encode_base_n(0, 30, custom_table), '9')
self.assertEqual(encode_base_n(80, 30, custom_table), '7P')
self.assertRaises(ValueError, encode_base_n, 0, 70)
self.assertRaises(ValueError, encode_base_n, 0, 60, custom_table)
def test_caesar(self):
self.assertEqual(caesar('ace', 'abcdef', 2), 'cea')
self.assertEqual(caesar('cea', 'abcdef', -2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', -2), 'eac')
self.assertEqual(caesar('eac', 'abcdef', 2), 'ace')
self.assertEqual(caesar('ace', 'abcdef', 0), 'ace')
self.assertEqual(caesar('xyz', 'abcdef', 2), 'xyz')
self.assertEqual(caesar('abc', 'acegik', 2), 'ebg')
self.assertEqual(caesar('ebg', 'acegik', -2), 'abc')
def test_rot47(self):
self.assertEqual(rot47('yt-dlp'), r'JE\5=A')
self.assertEqual(rot47('YT-DLP'), r'*%\s{!')
def test_urshift(self):
self.assertEqual(urshift(3, 1), 1)
self.assertEqual(urshift(-3, 1), 2147483646)
GET_ELEMENT_BY_CLASS_TEST_STRING = '''
<span class="foo bar">nice</span>
'''
def test_get_element_by_class(self):
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
self.assertEqual(get_element_by_class('foo', html), 'nice')
self.assertEqual(get_element_by_class('no-such-class', html), None)
def test_get_element_html_by_class(self):
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
self.assertEqual(get_element_html_by_class('foo', html), html.strip())
self.assertEqual(get_element_by_class('no-such-class', html), None)
GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING = '''
<div itemprop="author" itemscope>foo</div>
'''
def test_get_element_by_attribute(self):
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
self.assertEqual(get_element_by_attribute('class', 'foo bar', html), 'nice')
self.assertEqual(get_element_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_by_attribute('class', 'no-such-foo', html), None)
html = self.GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING
self.assertEqual(get_element_by_attribute('itemprop', 'author', html), 'foo')
def test_get_element_html_by_attribute(self):
html = self.GET_ELEMENT_BY_CLASS_TEST_STRING
self.assertEqual(get_element_html_by_attribute('class', 'foo bar', html), html.strip())
self.assertEqual(get_element_html_by_attribute('class', 'foo', html), None)
self.assertEqual(get_element_html_by_attribute('class', 'no-such-foo', html), None)
html = self.GET_ELEMENT_BY_ATTRIBUTE_TEST_STRING
self.assertEqual(get_element_html_by_attribute('itemprop', 'author', html), html.strip())
GET_ELEMENTS_BY_CLASS_TEST_STRING = '''
<span class="foo bar">nice</span><span class="foo bar">also nice</span>
'''
GET_ELEMENTS_BY_CLASS_RES = ['<span class="foo bar">nice</span>', '<span class="foo bar">also nice</span>']
def test_get_elements_by_class(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(get_elements_by_class('foo', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_class('no-such-class', html), [])
def test_get_elements_html_by_class(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(get_elements_html_by_class('foo', html), self.GET_ELEMENTS_BY_CLASS_RES)
self.assertEqual(get_elements_html_by_class('no-such-class', html), [])
def test_get_elements_by_attribute(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(get_elements_by_attribute('class', 'foo bar', html), ['nice', 'also nice'])
self.assertEqual(get_elements_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_by_attribute('class', 'no-such-foo', html), [])
def test_get_elements_html_by_attribute(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(get_elements_html_by_attribute('class', 'foo bar', html), self.GET_ELEMENTS_BY_CLASS_RES)
self.assertEqual(get_elements_html_by_attribute('class', 'foo', html), [])
self.assertEqual(get_elements_html_by_attribute('class', 'no-such-foo', html), [])
def test_get_elements_text_and_html_by_attribute(self):
html = self.GET_ELEMENTS_BY_CLASS_TEST_STRING
self.assertEqual(
list(get_elements_text_and_html_by_attribute('class', 'foo bar', html)),
list(zip(['nice', 'also nice'], self.GET_ELEMENTS_BY_CLASS_RES)))
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'foo', html)), [])
self.assertEqual(list(get_elements_text_and_html_by_attribute('class', 'no-such-foo', html)), [])
GET_ELEMENT_BY_TAG_TEST_STRING = '''
random text lorem ipsum</p>
<div>
this should be returned
<span>this should also be returned</span>
<div>
this should also be returned
</div>
closing tag above should not trick, so this should also be returned
</div>
but this text should not be returned
'''
GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML = GET_ELEMENT_BY_TAG_TEST_STRING.strip()[32:276]
GET_ELEMENT_BY_TAG_RES_OUTERDIV_TEXT = GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML[5:-6]
GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML = GET_ELEMENT_BY_TAG_TEST_STRING.strip()[78:119]
GET_ELEMENT_BY_TAG_RES_INNERSPAN_TEXT = GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML[6:-7]
def test_get_element_text_and_html_by_tag(self):
html = self.GET_ELEMENT_BY_TAG_TEST_STRING
self.assertEqual(
get_element_text_and_html_by_tag('div', html),
(self.GET_ELEMENT_BY_TAG_RES_OUTERDIV_TEXT, self.GET_ELEMENT_BY_TAG_RES_OUTERDIV_HTML))
self.assertEqual(
get_element_text_and_html_by_tag('span', html),
(self.GET_ELEMENT_BY_TAG_RES_INNERSPAN_TEXT, self.GET_ELEMENT_BY_TAG_RES_INNERSPAN_HTML))
self.assertRaises(compat_HTMLParseError, get_element_text_and_html_by_tag, 'article', html)
def test_iri_to_uri(self):
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b'),
'https://www.google.com/search?q=foo&ie=utf-8&oe=utf-8&client=firefox-b') # Same
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=Käsesoßenrührlöffel'), # German for cheese sauce stirring spoon
'https://www.google.com/search?q=K%C3%A4seso%C3%9Fenr%C3%BChrl%C3%B6ffel')
self.assertEqual(
iri_to_uri('https://www.google.com/search?q=lt<+gt>+eq%3D+amp%26+percent%25+hash%23+colon%3A+tilde~#trash=?&garbage=#'),
'https://www.google.com/search?q=lt%3C+gt%3E+eq%3D+amp%26+percent%25+hash%23+colon%3A+tilde~#trash=?&garbage=#')
self.assertEqual(
iri_to_uri('http://правозащита38.рф/category/news/'),
'http://xn--38-6kcaak9aj5chl4a3g.xn--p1ai/category/news/')
self.assertEqual(
iri_to_uri('http://www.правозащита38.рф/category/news/'),
'http://www.xn--38-6kcaak9aj5chl4a3g.xn--p1ai/category/news/')
self.assertEqual(
iri_to_uri('https://i❤.ws/emojidomain/👍👏🤝💪'),
'https://xn--i-7iq.ws/emojidomain/%F0%9F%91%8D%F0%9F%91%8F%F0%9F%A4%9D%F0%9F%92%AA')
self.assertEqual(
iri_to_uri('http://日本語.jp/'),
'http://xn--wgv71a119e.jp/')
self.assertEqual(
iri_to_uri('http://导航.中国/'),
'http://xn--fet810g.xn--fiqs8s/')
def test_clean_podcast_url(self):
self.assertEqual(clean_podcast_url('https://www.podtrac.com/pts/redirect.mp3/chtbl.com/track/5899E/traffic.megaphone.fm/HSW7835899191.mp3'), 'https://traffic.megaphone.fm/HSW7835899191.mp3')
self.assertEqual(clean_podcast_url('https://play.podtrac.com/npr-344098539/edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3'), 'https://edge1.pod.npr.org/anon.npr-podcasts/podcast/npr/waitwait/2020/10/20201003_waitwait_wwdtmpodcast201003-015621a5-f035-4eca-a9a1-7c118d90bc3c.mp3')
def test_LazyList(self):
it = list(range(10))
self.assertEqual(list(LazyList(it)), it)
self.assertEqual(LazyList(it).exhaust(), it)
self.assertEqual(LazyList(it)[5], it[5])
self.assertEqual(LazyList(it)[5:], it[5:])
self.assertEqual(LazyList(it)[:5], it[:5])
self.assertEqual(LazyList(it)[::2], it[::2])
self.assertEqual(LazyList(it)[1::2], it[1::2])
self.assertEqual(LazyList(it)[5::-1], it[5::-1])
self.assertEqual(LazyList(it)[6:2:-2], it[6:2:-2])
self.assertEqual(LazyList(it)[::-1], it[::-1])
self.assertTrue(LazyList(it))
self.assertFalse(LazyList(range(0)))
self.assertEqual(len(LazyList(it)), len(it))
self.assertEqual(repr(LazyList(it)), repr(it))
self.assertEqual(str(LazyList(it)), str(it))
self.assertEqual(list(LazyList(it, reverse=True)), it[::-1])
self.assertEqual(list(reversed(LazyList(it))[::-1]), it)
self.assertEqual(list(reversed(LazyList(it))[1:3:7]), it[::-1][1:3:7])
def test_LazyList_laziness(self):
def test(ll, idx, val, cache):
self.assertEqual(ll[idx], val)
self.assertEqual(getattr(ll, '_LazyList__cache'), list(cache))
ll = LazyList(range(10))
test(ll, 0, 0, range(1))
test(ll, 5, 5, range(6))
test(ll, -3, 7, range(10))
ll = LazyList(range(10), reverse=True)
test(ll, -1, 0, range(1))
test(ll, 3, 6, range(10))
ll = LazyList(itertools.count())
test(ll, 10, 10, range(11))
ll = reversed(ll)
test(ll, -15, 14, range(15))
def test_format_bytes(self):
self.assertEqual(format_bytes(0), '0.00B')
self.assertEqual(format_bytes(1000), '1000.00B')
self.assertEqual(format_bytes(1024), '1.00KiB')
self.assertEqual(format_bytes(1024**2), '1.00MiB')
self.assertEqual(format_bytes(1024**3), '1.00GiB')
self.assertEqual(format_bytes(1024**4), '1.00TiB')
self.assertEqual(format_bytes(1024**5), '1.00PiB')
self.assertEqual(format_bytes(1024**6), '1.00EiB')
self.assertEqual(format_bytes(1024**7), '1.00ZiB')
self.assertEqual(format_bytes(1024**8), '1.00YiB')
self.assertEqual(format_bytes(1024**9), '1024.00YiB')
def test_hide_login_info(self):
self.assertEqual(Config.hide_login_info(['-u', 'foo', '-p', 'bar']),
['-u', 'PRIVATE', '-p', 'PRIVATE'])
self.assertEqual(Config.hide_login_info(['-u']), ['-u'])
self.assertEqual(Config.hide_login_info(['-u', 'foo', '-u', 'bar']),
['-u', 'PRIVATE', '-u', 'PRIVATE'])
self.assertEqual(Config.hide_login_info(['--username=foo']),
['--username=PRIVATE'])
def test_locked_file(self):
TEXT = 'test_locked_file\n'
FILE = 'test_locked_file.ytdl'
MODES = 'war' # Order is important
try:
for lock_mode in MODES:
with locked_file(FILE, lock_mode, False) as f:
if lock_mode == 'r':
self.assertEqual(f.read(), TEXT * 2, 'Wrong file content')
else:
f.write(TEXT)
for test_mode in MODES:
testing_write = test_mode != 'r'
try:
with locked_file(FILE, test_mode, False):
pass
except (BlockingIOError, PermissionError):
if not testing_write: # FIXME
print(f'Known issue: Exclusive lock ({lock_mode}) blocks read access ({test_mode})')
continue
self.assertTrue(testing_write, f'{test_mode} is blocked by {lock_mode}')
else:
self.assertFalse(testing_write, f'{test_mode} is not blocked by {lock_mode}')
finally:
try:
os.remove(FILE)
except Exception:
pass
if __name__ == '__main__':
unittest.main()
| 46.417534 | 382 | 0.604141 | [
"Unlicense"
] | Yessssman/yt-dlp | test/test_utils.py | 84,977 | Python |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#pylint: skip-file
from nose.tools import assert_equal
from iot_message.cryptor.plain import Cryptor
from iot_message.message import Message
__author__ = 'Bartosz Kościów'
import iot_message.factory as factory
class TestCryptorPlain(object):
def setUp(self):
Message.chip_id = 'pc'
Message.node_name = 'Turkusik'
Message.drop_unencrypted = False
Message.encoders = []
Message.decoders = {}
def test_encode_message(self):
Message.add_encoder(Cryptor())
msg = factory.MessageFactory.create()
inp = {"event": "channel.on", "parameters": {"channel": 0}, "response": "", "targets": ["node-north"]}
msg.set(inp)
msg.encrypt()
assert_equal(inp["event"], msg.data["event"])
assert_equal(inp["parameters"], msg.data["parameters"])
assert_equal(inp["targets"], msg.data["targets"])
def test_decrypt_message(self):
Message.add_decoder(Cryptor())
inp = """{"protocol": "iot:1", "node": "Turkusik", "chip_id": "pc", "event": "message.plain", "parameters": ["a"], "response": "", "targets": ["Turkusik"]}"""
msg = factory.MessageFactory.create(inp)
assert_equal(msg.data["event"], "message.plain")
assert_equal(msg.data["parameters"], ["a"])
assert_equal(msg.data["targets"], ['Turkusik'])
| 33.95122 | 166 | 0.630747 | [
"MIT"
] | bkosciow/python_iot-1 | iot_message/tests/test_plain_cryptor.py | 1,394 | Python |
#! /usr/bin/env python
import os
os.mkdir('_testing')
os.chdir('_testing')
os.environ['MPLBACKEND'] = 'Agg'
from pymt.components import FrostNumberGeoModel as Model
model = Model()
for default in model.defaults:
print('{name}: {val} {units}'.format(
name=default[0], val=default[1][0], units=default[1][1]))
| 21.6 | 65 | 0.682099 | [
"MIT"
] | csdms-stack/permamodel-frostnumbergeo-csdms-recipe | recipe/run_test.py | 324 | Python |
__author__ = 'tinglev'
import logging
import requests
from requests import HTTPError, ConnectTimeout, RequestException
from modules import environment
from modules.subscribers.slack import slack_util
from modules.event_system.event_system import subscribe_to_event, unsubscribe_from_event
from modules import deployment_util
LOG = logging.getLogger(__name__)
DEFAULT_FLOTTSBRO_API_BASE_URL = 'https://api-r.referens.sys.kth.se/api/pipeline'
def subscribe():
subscribe_to_event('deployment', handle_deployment)
def unsubscribe():
unsubscribe_from_event('deployment', handle_deployment)
def handle_deployment(deployment):
global LOG
add(deployment)
return deployment
def get_base_url():
return environment.get_env_with_default_value(environment.FLOTTSBRO_API_BASE_URL, DEFAULT_FLOTTSBRO_API_BASE_URL)
def get_add_endpoint(cluster):
return '{}/v1/latest/{}'.format(get_base_url(), cluster)
def add(deployment):
call_endpoint(get_add_endpoint(deployment["cluster"]), deployment)
def get_headers():
api_key = environment.get_env(environment.FLOTTSBRO_API_KEY)
if not api_key:
LOG.error('No header env FLOTTSBRO_API_KEY specified ')
return None
return {
'api_key': api_key
}
def call_endpoint(endpoint, deployment):
global LOG
try:
headers = get_headers()
if headers:
response = requests.post(endpoint, data=deployment, headers=headers)
LOG.debug('Calling "%s", response was "%s"', endpoint, response.text)
else:
LOG.info('Skipped calling flottsbro-api, header constraints not satisfied.')
except (HTTPError, ConnectTimeout, RequestException) as request_ex:
LOG.error('Could not add deployment to Flottsbro-API: "%s"', request_ex) | 31.561404 | 117 | 0.740411 | [
"MIT"
] | KTH/alvares | modules/subscribers/flottsbro/flottsbro.py | 1,799 | Python |
"""
This module stores constants used during the operations of the UI.
"""
# Application info.
CM_NAME = "CovertMark"
CM_VER = "0.1"
CM_RELEASE = "alpha"
CM_AUTHOR = "C Shi"
CM_LINK = "https://github.com/chongyangshi"
CM_LICENSE = "Please see LICENSE.md for terms of usage of this program."
CM_TITLE = """\
_____ _ ___ ___ _
/ __ \ | | | \/ | | |
| / \/ _____ _____ _ __| |_| . . | __ _ _ __| | __
| | / _ \ \ / / _ | '__| __| |\/| |/ _` | '__| |/ /
| \__/| (_) \ V | __| | | |_| | | | (_| | | | <
\____/\___/ \_/ \___|_| \__\_| |_/\__,_|_| |_|\_\\
"""
DIVIDER = "-" * 40
PROCEDURE_RUN_FIELDS = ["strategy", "run_order", "user_params", "pt_pcap",
"pt_filters", "pt_collection", "neg_pcap", "neg_filters", "neg_collection",
"user_defined_name"]
# UI colours.
class colours:
GREEN = '\033[92m'
YELLOW = '\033[93m'
PURPLE = '\033[95m'
RED = '\033[91m'
GRAY = '\033[90m'
BGC = "\033[;7m"
BOLD = '\033[1m'
ENDC = '\033[0m'
RATINGS = {
(0, 75.0): (colours.GREEN, "This strategy is not very effective in identifying this obfuscation protocol."),
(75.0, 90.0): (colours.PURPLE, "This strategy is reasonably effective in identifying this obfuscation protocol, and can be deployed by a state censor with some difficulties."),
(90.0, 100.0): (colours.RED, "This strategy is very effective in identifying this obfuscation protocol, and can be easily deployed by a state censor.")
}
RATING_BANDS = {
(0, 75.0): "Good Covertness",
(75.0, 90.0): "Reasonable Covertness",
(90.0, 100.0): "Bad Covertness"
}
| 33.367347 | 180 | 0.582875 | [
"MIT"
] | chongyangshi/CovertMark | CovertMark/constants.py | 1,635 | Python |
import matplotlib.pyplot as plt
import numpy as np
def gen_data(n, start=0, end=10):
x = np.linspace(start, end, n)
y = np.sin(10*x) - x*x
return y
def gen_data_osc(n):
return np.array([1024 + (-2)**(-i/100) for i in range(n)])
def gen_data_rand(n):
return np.random.randn(n) + 0.3*np.linspace(0, 10, n)
def calc_cov(X, Y):
return np.sum((X - np.average(X))*(Y - np.average(Y))) / (X.shape[0] - 1)
def angular_coef(X,Y):
return calc_cov(X,Y)/calc_cov(X,X)
def linear_coef(a, X, Y):
return np.average(Y) - a*np.average(X)
count = 100
end = 100
time = np.linspace(0, end, count)
data = gen_data(count)
delta = end / count
preds = []
kg_preds = []
kg_prediction = 0
for i in range(1, count):
a = angular_coef(time[:i], data[:i])
b = linear_coef(a, time[:i], data[:i])
prediction = (time[i]+delta)*a + b
preds.append(prediction)
avg_X = np.average(time[:i])
avg_Y = np.average(data[:i])
cov = calc_cov(time[:i], data[:i])
estimate = time*a + b
plt.scatter(time, data, label="Medições", color="#FF5850")
plt.scatter(time[1:], preds, label="Est. Min. Quad.", color="#62B21C")
plt.plot(time, estimate, label="Min. Quad. Final", color="#36A1FF")
plt.xlabel("Tempo")
plt.ylabel("Temperatura")
plt.title("Aproximação Por Minimos Quadrados")
# Place a legend to the right of this smaller subplot.
plt.legend()
plt.show() | 24.862069 | 78 | 0.613731 | [
"MIT"
] | Raphael-C-Almeida/Wireless-Sensor-Network | Data Fusion Test/Minimos Quadrados Puro.py | 1,446 | Python |
import numpy as np
from time import sleep
import torch
import torch.nn as nn
import torch.nn.functional as F
from core.models.common_layers import batch_norm, get_nddr
from core.tasks import get_tasks
from core.utils import AttrDict
from core.utils.losses import poly
class SingleTaskNet(nn.Module):
def __init__(self, cfg, net1, net2):
super(SingleTaskNet, self).__init__()
self.cfg = cfg
self.net1 = net1
self.net2 = net2
assert len(net1.stages) == len(net2.stages)
self.task1, self.task2 = get_tasks(cfg)
self.num_stages = len(net1.stages)
self._step = 0
def step(self):
self._step += 1
def loss(self, x, labels):
label_1, label_2 = labels
result = self.forward(x)
result.loss1 = self.task1.loss(result.out1, label_1)
result.loss2 = self.task2.loss(result.out2, label_2)
result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2
return result
def forward(self, x):
N, C, H, W = x.size()
y = x.clone()
x = self.net1.base(x)
y = self.net2.base(y)
for stage_id in range(self.num_stages):
x = self.net1.stages[stage_id](x)
y = self.net2.stages[stage_id](y)
x = self.net1.head(x)
y = self.net2.head(y)
return AttrDict({'out1': x, 'out2': y})
class SharedFeatureNet(nn.Module):
def __init__(self, cfg, net1, net2):
super(SharedFeatureNet, self).__init__()
self.cfg = cfg
self.net1 = net1
self.net2 = net2
assert len(net1.stages) == len(net2.stages)
self.task1, self.task2 = get_tasks(cfg)
self.num_stages = len(net1.stages)
self._step = 0
def step(self):
self._step += 1
def loss(self, x, labels):
label_1, label_2 = labels
result = self.forward(x)
result.loss1 = self.task1.loss(result.out1, label_1)
result.loss2 = self.task2.loss(result.out2, label_2)
result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2
return result
def forward(self, x):
x = self.net1.base(x)
for stage_id in range(self.num_stages):
x = self.net1.stages[stage_id](x)
out1 = self.net1.head(x)
out2 = self.net2.head(x)
return AttrDict({'out1': out1, 'out2': out2})
class NDDRNet(nn.Module):
def __init__(self, cfg, net1, net2):
super(NDDRNet, self).__init__()
self.cfg = cfg
self.net1 = net1
self.net2 = net2
assert len(net1.stages) == len(net2.stages)
self.task1, self.task2 = get_tasks(cfg)
self.num_stages = len(net1.stages)
nddrs = []
total_channels = 0
for stage_id in range(self.num_stages):
out_channels = net1.stages[stage_id].out_channels
assert out_channels == net2.stages[stage_id].out_channels
if stage_id in cfg.TRAIN.AUX_LAYERS:
total_channels += out_channels
nddr = get_nddr(cfg, out_channels, out_channels)
nddrs.append(nddr)
nddrs = nn.ModuleList(nddrs)
self.aux = cfg.TRAIN.AUX
if self.aux:
print("Using shortcut")
self.aux_conv1 = nn.Sequential(
nn.Conv2d(total_channels, 256, kernel_size=3, padding=1, bias=False),
batch_norm(256, eps=1e-03, momentum=cfg.MODEL.BATCH_NORM_MOMENTUM),
nn.ReLU(inplace=True),
nn.Dropout2d(p=0.5),
nn.Conv2d(256, cfg.MODEL.NET1_CLASSES, kernel_size=1)
)
self.aux_conv2 = nn.Sequential(
nn.Conv2d(total_channels, 256, kernel_size=3, padding=1, bias=False),
batch_norm(256, eps=1e-03, momentum=cfg.MODEL.BATCH_NORM_MOMENTUM),
nn.ReLU(inplace=True),
nn.Dropout2d(p=0.5),
nn.Conv2d(256, cfg.MODEL.NET2_CLASSES, kernel_size=1)
)
self.nddrs = nn.ModuleDict({
'nddrs': nddrs,
})
self._step = 0
def step(self):
self._step += 1
def loss(self, x, labels):
label_1, label_2 = labels
result = self.forward(x)
result.loss1 = self.task1.loss(result.out1, label_1)
result.loss2 = self.task2.loss(result.out2, label_2)
result.loss = result.loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.loss2
if self.aux:
result.aux_loss1 = self.task1.loss(result.aux1, label_1)
result.aux_loss2 = self.task2.loss(result.aux2, label_2)
result.aux_loss = result.aux_loss1 + self.cfg.TRAIN.TASK2_FACTOR * result.aux_loss2
result.aux_weight = poly(start=self.cfg.TRAIN.AUX_WEIGHT, end=0.,
steps=self._step, total_steps=self.cfg.TRAIN.STEPS,
period=self.cfg.TRAIN.AUX_PERIOD,
power=1.)
result.loss += result.aux_weight * result.aux_loss
return result
def forward(self, x):
N, C, H, W = x.size()
y = x.clone()
x = self.net1.base(x)
y = self.net2.base(y)
xs, ys = [], []
for stage_id in range(self.num_stages):
x = self.net1.stages[stage_id](x)
y = self.net2.stages[stage_id](y)
if isinstance(x, list):
x[0], y[0] = self.nddrs['nddrs'][stage_id](x[0], y[0])
else:
x, y = self.nddrs['nddrs'][stage_id](x, y)
if self.aux and self.training and stage_id in self.cfg.TRAIN.AUX_LAYERS:
xs.append(x)
ys.append(y)
x = self.net1.head(x)
y = self.net2.head(y)
result = AttrDict({'out1': x, 'out2': y})
if self.aux and self.training:
_, _, h, w = x.size()
aux_x = torch.cat([F.interpolate(_x, (h, w), mode='bilinear', align_corners=True) for _x in xs[:-1]] + [xs[-1]],
dim=1)
aux_y = torch.cat([F.interpolate(_y, (h, w), mode='bilinear', align_corners=True) for _y in ys[:-1]] + [ys[-1]],
dim=1)
result.aux1 = self.aux_conv1(aux_x)
result.aux2 = self.aux_conv2(aux_y)
return result
| 36.414773 | 124 | 0.559058 | [
"Apache-2.0"
] | WZzhaoyi/MTLNAS | core/models/nddr_net.py | 6,409 | Python |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class SteamScrapeSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class SteamScrapeDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.663462 | 78 | 0.666852 | [
"MIT"
] | argwood/IndieP | steam-scrapy/steam_scrape/middlewares.py | 3,607 | Python |
"""The tests for the Canary sensor platform."""
import copy
import unittest
from unittest.mock import Mock
from homeassistant.components.canary import DATA_CANARY
from homeassistant.components.sensor import canary
from homeassistant.components.sensor.canary import CanarySensor, \
SENSOR_TYPES, ATTR_AIR_QUALITY, STATE_AIR_QUALITY_NORMAL, \
STATE_AIR_QUALITY_ABNORMAL, STATE_AIR_QUALITY_VERY_ABNORMAL
from tests.common import (get_test_home_assistant)
from tests.components.test_canary import mock_device, mock_location
VALID_CONFIG = {
"canary": {
"username": "[email protected]",
"password": "bar",
}
}
class TestCanarySensorSetup(unittest.TestCase):
"""Test the Canary platform."""
DEVICES = []
def add_entities(self, devices, action):
"""Mock add devices."""
for device in devices:
self.DEVICES.append(device)
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.config = copy.deepcopy(VALID_CONFIG)
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_sensors(self):
"""Test the sensor setup."""
online_device_at_home = mock_device(20, "Dining Room", True, "Canary")
offline_device_at_home = mock_device(21, "Front Yard", False, "Canary")
online_device_at_work = mock_device(22, "Office", True, "Canary")
self.hass.data[DATA_CANARY] = Mock()
self.hass.data[DATA_CANARY].locations = [
mock_location("Home", True, devices=[online_device_at_home,
offline_device_at_home]),
mock_location("Work", True, devices=[online_device_at_work]),
]
canary.setup_platform(self.hass, self.config, self.add_entities, None)
assert 6 == len(self.DEVICES)
def test_temperature_sensor(self):
"""Test temperature sensor with fahrenheit."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home", False)
data = Mock()
data.get_reading.return_value = 21.1234
sensor = CanarySensor(data, SENSOR_TYPES[0], location, device)
sensor.update()
assert "Home Family Room Temperature" == sensor.name
assert "°C" == sensor.unit_of_measurement
assert 21.12 == sensor.state
assert "mdi:thermometer" == sensor.icon
def test_temperature_sensor_with_none_sensor_value(self):
"""Test temperature sensor with fahrenheit."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home", False)
data = Mock()
data.get_reading.return_value = None
sensor = CanarySensor(data, SENSOR_TYPES[0], location, device)
sensor.update()
assert sensor.state is None
def test_humidity_sensor(self):
"""Test humidity sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 50.4567
sensor = CanarySensor(data, SENSOR_TYPES[1], location, device)
sensor.update()
assert "Home Family Room Humidity" == sensor.name
assert "%" == sensor.unit_of_measurement
assert 50.46 == sensor.state
assert "mdi:water-percent" == sensor.icon
def test_air_quality_sensor_with_very_abnormal_reading(self):
"""Test air quality sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 0.4
sensor = CanarySensor(data, SENSOR_TYPES[2], location, device)
sensor.update()
assert "Home Family Room Air Quality" == sensor.name
assert sensor.unit_of_measurement is None
assert 0.4 == sensor.state
assert "mdi:weather-windy" == sensor.icon
air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY]
assert STATE_AIR_QUALITY_VERY_ABNORMAL == air_quality
def test_air_quality_sensor_with_abnormal_reading(self):
"""Test air quality sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 0.59
sensor = CanarySensor(data, SENSOR_TYPES[2], location, device)
sensor.update()
assert "Home Family Room Air Quality" == sensor.name
assert sensor.unit_of_measurement is None
assert 0.59 == sensor.state
assert "mdi:weather-windy" == sensor.icon
air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY]
assert STATE_AIR_QUALITY_ABNORMAL == air_quality
def test_air_quality_sensor_with_normal_reading(self):
"""Test air quality sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 1.0
sensor = CanarySensor(data, SENSOR_TYPES[2], location, device)
sensor.update()
assert "Home Family Room Air Quality" == sensor.name
assert sensor.unit_of_measurement is None
assert 1.0 == sensor.state
assert "mdi:weather-windy" == sensor.icon
air_quality = sensor.device_state_attributes[ATTR_AIR_QUALITY]
assert STATE_AIR_QUALITY_NORMAL == air_quality
def test_air_quality_sensor_with_none_sensor_value(self):
"""Test air quality sensor."""
device = mock_device(10, "Family Room", "Canary")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = None
sensor = CanarySensor(data, SENSOR_TYPES[2], location, device)
sensor.update()
assert sensor.state is None
assert sensor.device_state_attributes is None
def test_battery_sensor(self):
"""Test battery sensor."""
device = mock_device(10, "Family Room", "Canary Flex")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = 70.4567
sensor = CanarySensor(data, SENSOR_TYPES[4], location, device)
sensor.update()
assert "Home Family Room Battery" == sensor.name
assert "%" == sensor.unit_of_measurement
assert 70.46 == sensor.state
assert "mdi:battery-70" == sensor.icon
def test_wifi_sensor(self):
"""Test battery sensor."""
device = mock_device(10, "Family Room", "Canary Flex")
location = mock_location("Home")
data = Mock()
data.get_reading.return_value = -57
sensor = CanarySensor(data, SENSOR_TYPES[3], location, device)
sensor.update()
assert "Home Family Room Wifi" == sensor.name
assert "dBm" == sensor.unit_of_measurement
assert -57 == sensor.state
assert "mdi:wifi" == sensor.icon
| 34.219512 | 79 | 0.648753 | [
"Apache-2.0"
] | 27tech/home-assistant | tests/components/sensor/test_canary.py | 7,016 | Python |
import linelib
import datetime
import signal
def handler(x, y):
pass
signal.signal(signal.SIGUSR1, handler)
signal.signal(signal.SIGALRM, handler)
while True:
linelib.sendblock("date", {"full_text": datetime.datetime.now().strftime(
"%Y-%m-%e %H:%M:%S"
)})
linelib.sendPID("date")
linelib.waitsig(1)
| 18.444444 | 77 | 0.671687 | [
"MIT"
] | 5225225/bar | modules/timeblock.py | 332 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-07 21:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20160706_2232'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='picture_path',
field=models.CharField(blank=True, max_length=128, null=True),
),
]
| 23 | 74 | 0.6294 | [
"MIT"
] | otherland8/market-place | market_place/users/migrations/0003_auto_20160708_0036.py | 483 | Python |
# Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from collections import defaultdict
import json
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
class LockCache(object):
# Lock change receivers are called whenever a change occurs to the locks. It allows something to
# respond to changes. An example would be long polling.
# The receivers are called with the lock being removed and LOCK_ADD or LOCK_REMOVE as the paramter.
lock_change_receivers = []
LOCK_ADD = 1
LOCK_REMOVE = 2
def __init__(self):
from chroma_core.models import Job, StateLock
self.write_locks = []
self.write_by_item = defaultdict(list)
self.read_locks = []
self.read_by_item = defaultdict(list)
self.all_by_job = defaultdict(list)
self.all_by_item = defaultdict(list)
for job in Job.objects.filter(~Q(state="complete")):
if job.locks_json:
locks = json.loads(job.locks_json)
for lock in locks:
self._add(StateLock.from_dict(job, lock))
def call_receivers(self, lock, add_remove):
for lock_change_receiver in self.lock_change_receivers:
lock_change_receiver(lock, add_remove)
def remove_job(self, job):
locks = list(self.all_by_job[job.id])
n = len(locks)
for lock in locks:
if lock.write:
self.write_locks.remove(lock)
self.write_by_item[lock.locked_item].remove(lock)
else:
self.read_locks.remove(lock)
self.read_by_item[lock.locked_item].remove(lock)
self.all_by_job[job.id].remove(lock)
self.all_by_item[lock.locked_item].remove(lock)
self.call_receivers(lock, self.LOCK_REMOVE)
return n
def add(self, lock):
self._add(lock)
def _add(self, lock):
assert lock.job.id is not None
if lock.write:
self.write_locks.append(lock)
self.write_by_item[lock.locked_item].append(lock)
else:
self.read_locks.append(lock)
self.read_by_item[lock.locked_item].append(lock)
self.all_by_job[lock.job.id].append(lock)
self.all_by_item[lock.locked_item].append(lock)
self.call_receivers(lock, self.LOCK_ADD)
def get_by_job(self, job):
return self.all_by_job[job.id]
def get_all(self, locked_item):
return self.all_by_item[locked_item]
def get_latest_write(self, locked_item, not_job=None):
try:
if not_job is not None:
return sorted(
[l for l in self.write_by_item[locked_item] if l.job != not_job],
lambda a, b: cmp(a.job.id, b.job.id),
)[-1]
return sorted(self.write_by_item[locked_item], lambda a, b: cmp(a.job.id, b.job.id))[-1]
except IndexError:
return None
def get_read_locks(self, locked_item, after, not_job):
return [x for x in self.read_by_item[locked_item] if after <= x.job.id and x.job != not_job]
def get_write(self, locked_item):
return self.write_by_item[locked_item]
def get_by_locked_item(self, item):
return self.all_by_item[item]
def get_write_by_locked_item(self):
result = {}
for locked_item, locks in self.write_by_item.items():
if locks:
result[locked_item] = sorted(locks, lambda a, b: cmp(a.job.id, b.job.id))[-1]
return result
def lock_change_receiver():
"""
A decorator for connecting receivers to signals that a lock has change.
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
"""
def _decorator(func):
LockCache.lock_change_receivers.append(func)
return func
return _decorator
def to_lock_json(lock, add_remove=LockCache.LOCK_ADD):
if getattr(lock.locked_item, "downcast", None) and callable(lock.locked_item.downcast):
item = lock.locked_item.downcast()
else:
item = lock.locked_item
return {
"job_id": lock.job.id,
"content_type_id": ContentType.objects.get_for_model(item).id,
"item_id": lock.locked_item.id,
"uuid": lock.uuid,
"description": lock.job.description(),
"lock_type": "write" if lock.write else "read",
"action": "add" if add_remove == LockCache.LOCK_ADD else "remove",
}
| 33.028571 | 103 | 0.632353 | [
"MIT"
] | beevans/integrated-manager-for-lustre | chroma_core/services/job_scheduler/lock_cache.py | 4,624 | Python |
from django.urls import path, include
urlpatterns = [
path('', include(('rest_friendship.urls', 'rest_friendship'), namespace='rest_friendship')),
]
| 25.666667 | 96 | 0.720779 | [
"ISC"
] | sflems/django-rest-friendship | tests/urls.py | 154 | Python |
#for fixture loading
| 10.5 | 20 | 0.809524 | [
"BSD-2-Clause"
] | chalkchisel/django-rest-framework | examples/permissionsexample/models.py | 21 | Python |
"""
Support for interfacing with the XBMC/Kodi JSON-RPC API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.kodi/
"""
import asyncio
import logging
import urllib
import aiohttp
import voluptuous as vol
from homeassistant.components.media_player import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_SEEK,
SUPPORT_PLAY_MEDIA, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, SUPPORT_STOP,
SUPPORT_TURN_OFF, SUPPORT_PLAY, SUPPORT_VOLUME_STEP, MediaPlayerDevice,
PLATFORM_SCHEMA)
from homeassistant.const import (
STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING, CONF_HOST, CONF_NAME,
CONF_PORT, CONF_USERNAME, CONF_PASSWORD)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['jsonrpc-async==0.2']
_LOGGER = logging.getLogger(__name__)
CONF_TURN_OFF_ACTION = 'turn_off_action'
DEFAULT_NAME = 'Kodi'
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 5
TURN_OFF_ACTION = [None, 'quit', 'hibernate', 'suspend', 'reboot', 'shutdown']
SUPPORT_KODI = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | SUPPORT_SEEK | \
SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_PLAY | SUPPORT_VOLUME_STEP
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TURN_OFF_ACTION, default=None): vol.In(TURN_OFF_ACTION),
vol.Inclusive(CONF_USERNAME, 'auth'): cv.string,
vol.Inclusive(CONF_PASSWORD, 'auth'): cv.string,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Setup the Kodi platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
if host.startswith('http://') or host.startswith('https://'):
host = host.lstrip('http://').lstrip('https://')
_LOGGER.warning(
"Kodi host name should no longer conatin http:// See updated "
"definitions here: "
"https://home-assistant.io/components/media_player.kodi/")
entity = KodiDevice(
hass,
name=config.get(CONF_NAME),
host=host, port=port,
username=config.get(CONF_USERNAME),
password=config.get(CONF_PASSWORD),
turn_off_action=config.get(CONF_TURN_OFF_ACTION))
yield from async_add_entities([entity], update_before_add=True)
class KodiDevice(MediaPlayerDevice):
"""Representation of a XBMC/Kodi device."""
def __init__(self, hass, name, host, port, username=None, password=None,
turn_off_action=None):
"""Initialize the Kodi device."""
import jsonrpc_async
self.hass = hass
self._name = name
kwargs = {
'timeout': DEFAULT_TIMEOUT,
'session': async_get_clientsession(hass),
}
if username is not None:
kwargs['auth'] = aiohttp.BasicAuth(username, password)
image_auth_string = "{}:{}@".format(username, password)
else:
image_auth_string = ""
self._http_url = 'http://{}:{}/jsonrpc'.format(host, port)
self._image_url = 'http://{}{}:{}/image'.format(
image_auth_string, host, port)
self._server = jsonrpc_async.Server(self._http_url, **kwargs)
self._turn_off_action = turn_off_action
self._players = list()
self._properties = None
self._item = None
self._app_properties = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@asyncio.coroutine
def _get_players(self):
"""Return the active player objects or None."""
import jsonrpc_async
try:
return (yield from self._server.Player.GetActivePlayers())
except jsonrpc_async.jsonrpc.TransportError:
if self._players is not None:
_LOGGER.info('Unable to fetch kodi data')
_LOGGER.debug('Unable to fetch kodi data', exc_info=True)
return None
@property
def state(self):
"""Return the state of the device."""
if self._players is None:
return STATE_OFF
if len(self._players) == 0:
return STATE_IDLE
if self._properties['speed'] == 0 and not self._properties['live']:
return STATE_PAUSED
else:
return STATE_PLAYING
@asyncio.coroutine
def async_update(self):
"""Retrieve latest state."""
self._players = yield from self._get_players()
if self._players is not None and len(self._players) > 0:
player_id = self._players[0]['playerid']
assert isinstance(player_id, int)
self._properties = yield from self._server.Player.GetProperties(
player_id,
['time', 'totaltime', 'speed', 'live']
)
self._item = (yield from self._server.Player.GetItem(
player_id,
['title', 'file', 'uniqueid', 'thumbnail', 'artist']
))['item']
self._app_properties = \
yield from self._server.Application.GetProperties(
['volume', 'muted']
)
else:
self._properties = None
self._item = None
self._app_properties = None
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._app_properties is not None:
return self._app_properties['volume'] / 100.0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
if self._app_properties is not None:
return self._app_properties['muted']
@property
def media_content_id(self):
"""Content ID of current playing media."""
if self._item is not None:
return self._item.get('uniqueid', None)
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._players is not None and len(self._players) > 0:
return self._players[0]['type']
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._properties is not None and not self._properties['live']:
total_time = self._properties['totaltime']
return (
total_time['hours'] * 3600 +
total_time['minutes'] * 60 +
total_time['seconds'])
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._item is None:
return None
url_components = urllib.parse.urlparse(self._item['thumbnail'])
if url_components.scheme == 'image':
return '{}/{}'.format(
self._image_url,
urllib.parse.quote_plus(self._item['thumbnail']))
@property
def media_title(self):
"""Title of current playing media."""
# find a string we can use as a title
if self._item is not None:
return self._item.get(
'title',
self._item.get('label', self._item.get('file', 'unknown')))
@property
def supported_media_commands(self):
"""Flag of media commands that are supported."""
supported_media_commands = SUPPORT_KODI
if self._turn_off_action in TURN_OFF_ACTION:
supported_media_commands |= SUPPORT_TURN_OFF
return supported_media_commands
@asyncio.coroutine
def async_turn_off(self):
"""Execute turn_off_action to turn off media player."""
if self._turn_off_action == 'quit':
yield from self._server.Application.Quit()
elif self._turn_off_action == 'hibernate':
yield from self._server.System.Hibernate()
elif self._turn_off_action == 'suspend':
yield from self._server.System.Suspend()
elif self._turn_off_action == 'reboot':
yield from self._server.System.Reboot()
elif self._turn_off_action == 'shutdown':
yield from self._server.System.Shutdown()
else:
_LOGGER.warning('turn_off requested but turn_off_action is none')
@asyncio.coroutine
def async_volume_up(self):
"""Volume up the media player."""
assert (
yield from self._server.Input.ExecuteAction('volumeup')) == 'OK'
@asyncio.coroutine
def async_volume_down(self):
"""Volume down the media player."""
assert (
yield from self._server.Input.ExecuteAction('volumedown')) == 'OK'
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
return self._server.Application.SetVolume(int(volume * 100))
def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) media player.
This method must be run in the event loop and returns a coroutine.
"""
return self._server.Application.SetMute(mute)
@asyncio.coroutine
def async_set_play_state(self, state):
"""Helper method for play/pause/toggle."""
players = yield from self._get_players()
if len(players) != 0:
yield from self._server.Player.PlayPause(
players[0]['playerid'], state)
def async_media_play_pause(self):
"""Pause media on media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state('toggle')
def async_media_play(self):
"""Play media.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(True)
def async_media_pause(self):
"""Pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
return self.async_set_play_state(False)
@asyncio.coroutine
def async_media_stop(self):
"""Stop the media player."""
players = yield from self._get_players()
if len(players) != 0:
yield from self._server.Player.Stop(players[0]['playerid'])
@asyncio.coroutine
def _goto(self, direction):
"""Helper method used for previous/next track."""
players = yield from self._get_players()
if len(players) != 0:
if direction == 'previous':
# first seek to position 0. Kodi goes to the beginning of the
# current track if the current track is not at the beginning.
yield from self._server.Player.Seek(players[0]['playerid'], 0)
yield from self._server.Player.GoTo(
players[0]['playerid'], direction)
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('next')
def async_media_previous_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self._goto('previous')
@asyncio.coroutine
def async_media_seek(self, position):
"""Send seek command."""
players = yield from self._get_players()
time = {}
time['milliseconds'] = int((position % 1) * 1000)
position = int(position)
time['seconds'] = int(position % 60)
position /= 60
time['minutes'] = int(position % 60)
position /= 60
time['hours'] = int(position)
if len(players) != 0:
yield from self._server.Player.Seek(players[0]['playerid'], time)
def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player.
This method must be run in the event loop and returns a coroutine.
"""
if media_type == "CHANNEL":
return self._server.Player.Open(
{"item": {"channelid": int(media_id)}})
else:
return self._server.Player.Open(
{"item": {"file": str(media_id)}})
| 33.4 | 78 | 0.621876 | [
"MIT"
] | sbidoul/home-assistant | homeassistant/components/media_player/kodi.py | 12,525 | Python |
## DQN Tutorial
## Implementation from https://github.com/FitMachineLearning
import torch
import gym
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from dataclasses import dataclass
from typing import Any
from random import random
@dataclass
class sars:
state: Any
action: Any
reward: float
next_state: Any
done: bool
qval: float
advantage: float = 0.0
class DQNAgent:
def __init__(self,actor_model,critic_model):
self.actor_model = actor_model
self.critic_model = critic_model
def get_actions(self, observations):
# import ipdb; ipdb.set_trace()
guessed_actions = self.actor_model(torch.Tensor(observations).to(self.actor_model.device))
return guessed_actions
def get_predicted_Q_values(self,observation_and_action):
guessed_Qs = self.critic_model(torch.Tensor(observation_and_action))
return guessed_Qs(-1)[1]
def update_target_model(self):
self.targetModel.load_state_dict(self.model.state_dict())
class ActorModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(ActorModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
# import ipdb; ipdb.set_trace()
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0],512),
torch.nn.ReLU(),
# torch.nn.Linear(1024,256),
# torch.nn.ReLU(),
torch.nn.Linear(512,action_shape[0])
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class CriticModel(nn.Module):
def __init__(self, obs_shape, action_shape,lr):
super(CriticModel,self).__init__()
assert len(obs_shape) ==1, "This network only works on flat observations"
self.obs_shape = obs_shape
self.action_shape = action_shape
self.net = torch.nn.Sequential(
torch.nn.Linear(obs_shape[0]+action_shape[0],512),
torch.nn.ReLU(),
# torch.nn.Linear(2048,512),
# torch.nn.ReLU(),
torch.nn.Linear(512,1) # one out put because we are predicting Q values
)
self.opt = optim.Adam(self.net.parameters(),lr=lr)
if torch.cuda.is_available():
print("Using CUDA")
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cuda:1')
self.to(self.device)
def forward(self, x):
return self.net(x)
class ReplayBuffer:
def __init__(self, buffer_size = 1000):
# self.buffer_size = buffer_size
self.buffer_size = buffer_size
self.buffer = np.empty((buffer_size),dtype=object)
# self.buffer = []
self.index = 0
def insert(self, sars):
# self.buffer.append(sars)
# print("inserting index ", self.index, "@",self.index%self.buffer_size)
if(self.index == 10):
print("first 10 ",self.buffer[0:10])
# import ipdb; ipdb.set_trace()
# if(self.index > self.buffer_size and self.index%self.buffer_size==0):
# print("first 10 ",self.buffer[0:10])
# print("last 10 ",self.buffer[-10:])
# print("")
# import ipdb; ipdb.set_trace()
self.buffer[self.index%self.buffer_size] = sars
self.index+=1
# self.buffer.append(sars)
# if(len(self.buffer)>self.buffer_size):
# self.buffer = self.buffer[1:]
# # print("Clipping Buffer at size", len(self.buffer))
def sample(self, num_samples,current_episode_steps):
# assert num_samples < min(len(self.buffer),self.index)
# if num_samples>self.index:
# print("sampling n ",min(num_samples,self.index))
a = self.buffer[0:min(self.index,self.buffer_size)]
if len(self.buffer) > 0:
return np.random.choice(a, min(num_samples,self.index))
else:
return []
| 34.992063 | 99 | 0.608528 | [
"MIT"
] | FitMachineLearning/FitML | Pytorch/ActorCritic/agent_and_model.py | 4,409 | Python |
""" library to take autodiff and execute a computation graph """
from __future__ import absolute_import
import numpy as np
from .Node import Op
from .. import ndarray
from ..stream import *
import ctypes
import os
from pynvml import *
FLAG_SHOW_GRAPH = False
G_NODE_ID = 0
NAME_RULE = 1
def communicate_init(worker_num, worker_id, source_ip, target_ip):
global lib_communicate
# lib_communicate.DL_Connect_Init(2, 0, "*:4001", "localhost:4002")
# lib_communicate.DL_Connect_Init(2, 1, "*:4002", "localhost:4001")
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
lib_path = os.path.join(curr_path, '../../build/lib/')
path_to_so_file = os.path.join(lib_path, "lib_communication.so")
lib_communicate = ctypes.cdll.LoadLibrary(path_to_so_file)
lib_communicate.DL_Connect_Init(
worker_num, worker_id, source_ip, target_ip)
def communicate_finish():
lib_communicate.DL_Communicate_Close()
class Distributed_CommunicateOp(Op):
def __call__(self, nodeA):
new_node = Op.__call__(self)
new_node.inputs = [nodeA]
new_node.name = "Distributed_Communicate(%s)" % (nodeA.name)
# print nodeA.name
return new_node
def compute(self, node, input_vals, output_val, use_numpy=True):
after_reduce_gradient_cpu = ndarray.empty(
shape=output_val.shape, ctx=ndarray.cpu(0))
if use_numpy:
gradient_val_cpu = ndarray.array(input_vals[0], ctx=ndarray.cpu(0))
else:
gradient_val_cpu = ndarray.array(
input_vals[0].asnumpy(), ctx=ndarray.cpu(0))
# print gradient_val_cpu.asnumpy()
lib_communicate.DL_Communicate_Init(gradient_val_cpu.handle)
lib_communicate.DL_Communicate(
gradient_val_cpu.handle, after_reduce_gradient_cpu.handle)
# print after_reduce_gradient_cpu.asnumpy()
if use_numpy:
output_val[:] = after_reduce_gradient_cpu.asnumpy()
else:
after_reduce_gradient_cpu.copyto(output_val)
def gradient(self, node, output_grad):
raise NotImplementedError
def infer_shape(self, node, input_shapes):
return input_shapes[0]
distributed_communicate_op = Distributed_CommunicateOp()
class StreamExecutor(object):
"""Executor computes values for given set of nodes in computation graph."""
def __init__(self, eval_node_list, ctx = None, stream = None, policy = None):
"""
Parameters
----------
eval_node_list: list of nodes whose values need to be computed.
ctx: runtime DLContext, default is None which means np.ndarray on cpu
topo_order: list of nodes in topological order
node_to_shape_map: dict from node to shape of the node
node_to_arr_map: dict from node to ndarray.NDArray allocated for node
feed_shapes: shapes of feed_dict from last run(...)
"""
self.eval_node_list = eval_node_list
self.ctx = ctx
if stream is None:
self.stream = create_stream_handle(ctx)
else:
self.stream = stream
self.stream.sync()
self.topo_order = find_topo_sort(self.eval_node_list)
self.node_to_shape_map = None
self.node_to_arr_map = None
self.feed_shapes = None
self.policy = policy
if self.policy == 'swap':
self.swap_queue = []
def infer_shape(self, feed_shapes):
"""Given shapes of feed_dict nodes, infer shape for all nodes in graph.
Implementation note:
Iteratively calls node.op.infer_shape to infer shapes.
Node shapes stored in self.node_to_shape_map.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
"""
"""TODO: Your code here"""
self.node_to_shape_map = {}
for node in self.topo_order:
if node in feed_shapes:
self.node_to_shape_map[node] = feed_shapes[node]
else:
# print(node.name)
input_shapes = [self.node_to_shape_map[n] for n in node.inputs]
self.node_to_shape_map[node] = node.op.infer_shape(
node, input_shapes)
def memory_plan(self, feed_shapes):
"""Allocates ndarray.NDArray for every node except feed_dict nodes.
Implementation note:
Option 1: Alloc a ndarray.NDArray per node that persists across run()
Option 2: Implement a memory pool to reuse memory for nodes of same
shapes. More details see Lecture 7.
For both options, self.node_to_arr_map stores node->NDArray mapping to
allow mapping to persist across multiple executor.run().
Hint: use ndarray.empty(shape, ctx=self.ctx) to allocate NDArray.
Parameters
----------
feed_shapes: node->shapes mapping for feed_dict nodes.
"""
"""TODO: Your code here"""
assert (self.ctx is not None)
# self.infer_shape(feed_shapes)
self.node_to_arr_map = {}
for node, shape in self.node_to_shape_map.items():
if self.policy == 'swap':
if not node.swap:
self.node_to_arr_map[node] = ndarray.empty(
shape, ctx=self.ctx)
elif self.policy == 'vdnn':
self.node_to_arr_map[node] = np.empty(shape)
else:
self.node_to_arr_map[node] = ndarray.empty(shape, ctx=self.ctx)
def run(self, feed_dict, convert_to_numpy_ret_vals=False):
"""
Parameters
----------
feed_dict: a dictionary of node->np.ndarray supplied by user.
convert_to_numpy_ret_vals: whether to convert ret vals to np.array
Returns
-------
A list of values for nodes in eval_node_list. NDArray or np.ndarray.
"""
def are_feed_shapes_equal(sa, sb):
if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):
return False
unmatched_item = set(sa.items()) ^ set(sb.items())
return len(unmatched_item) == 0
# Assume self.ctx is None implies numpy array and numpy ops.
use_numpy = self.ctx is None
node_to_val_map = {}
for node, value in feed_dict.items():
if use_numpy:
# all values passed in feed_dict must be np.ndarray
assert isinstance(value, np.ndarray)
node_to_val_map[node] = value
else:
# convert values to ndarray.NDArray if necessary
if isinstance(value, np.ndarray):
node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
elif isinstance(value, ndarray.NDArray):
node_to_val_map[node] = value
else:
assert False, "feed_dict value type not supported"
# print"xxxx"
# collect shapes for all placeholders
# infer shape if feed_shapes changed since last run
# e.g. call run() on test data after trainng
# print feed_shapes
feed_shapes = {}
for node in node_to_val_map:
feed_shapes[node] = node_to_val_map[node].shape
if(not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):
self.infer_shape(feed_shapes)
self.feed_shapes = feed_shapes
if (not use_numpy):
self.memory_plan(self.feed_shapes)
for node in self.topo_order:
if node in node_to_val_map:
continue
input_vals = [node_to_val_map[n] for n in node.inputs]
if use_numpy:
node_val = np.empty(shape=self.node_to_shape_map[node])
else:
node_val = self.node_to_arr_map[node]
# print(node.name)
node.op.compute(node, input_vals, node_val, use_numpy, self.stream)
node_to_val_map[node] = node_val
self.stream.sync()
if not use_numpy and convert_to_numpy_ret_vals:
return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]
return [node_to_val_map[n] for n in self.eval_node_list]
# def run(self, feed_dict, convert_to_numpy_ret_vals=False):
# """
# Parameters
# ----------
# feed_dict: a dictionary of node->np.ndarray supplied by user.
# convert_to_numpy_ret_vals: whether to convert ret vals to np.array
# Returns
# -------
# A list of values for nodes in eval_node_list. NDArray or np.ndarray.
# """
# def are_feed_shapes_equal(sa, sb):
# if (not isinstance(sa, dict)) or (not isinstance(sb, dict)):
# return False
# unmatched_item = set(sa.items()) ^ set(sb.items())
# return len(unmatched_item) == 0
# # Assume self.ctx is None implies numpy array and numpy ops.
# use_numpy = self.ctx is None
# node_to_val_map = {}
# for node, value in feed_dict.items():
# if self.policy == 'vdnn':
# assert isinstance(value, np.ndarray)
# node_to_val_map[node] = value
# else:
# if use_numpy:
# # all values passed in feed_dict must be np.ndarray
# assert isinstance(value, np.ndarray)
# node_to_val_map[node] = value
# else:
# # convert values to ndarray.NDArray if necessary
# if isinstance(value, np.ndarray):
# if self.policy == 'swap':
# if node.swap == True:
# node_to_val_map[node] = value
# else:
# node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
# else:
# node_to_val_map[node] = ndarray.array(value, ctx=self.ctx)
# elif isinstance(value, ndarray.NDArray):
# node_to_val_map[node] = value
# else:
# assert False, "feed_dict value type not supported"
# # collect shapes for all placeholders
# feed_shapes = {}
# for node in node_to_val_map:
# feed_shapes[node] = node_to_val_map[node].shape
# # infer shape if feed_shapes changed since last run
# # e.g. call run() on test data after trainng
# # print feed_shapes
# if (not are_feed_shapes_equal(feed_shapes, self.feed_shapes)):
# self.infer_shape(feed_shapes)
# self.feed_shapes = feed_shapes
# if not self.policy == 'vdnn':
# # plan memory if using GPU
# if (not use_numpy):
# self.memory_plan(feed_shapes)
# # Traverse graph in topo order and compute values for all nodes.
# global FLAG_SHOW_GRAPH
# if self.policy == 'swap':
# # generate swap queue
# if not use_numpy:
# for node in self.topo_order:
# if node not in node_to_val_map:
# # variable in placeholder
# for input_node in node.inputs:
# if input_node.swap == True:
# self.swap_queue.append(input_node)
# # variable grad
# if node.swap == True:
# self.swap_queue.append(node)
# node_in_GPU = None
# if FLAG_SHOW_GRAPH:
# print "Show swap queue:"
# for node in self.swap_queue:
# print node
# elif self.policy == 'vdnn':
# # TODO traverse graph to select in-gpu window
# window = [0,0]
# if not use_numpy:
# nvmlInit()
# handle = nvmlDeviceGetHandleByIndex(0)
# info = nvmlDeviceGetMemoryInfo(handle)
# gpu_mem = info.free
# nvmlShutdown()
# loss_node = self.eval_node_list[0]
# window[1] = self.topo_order.index(loss_node)+1
# window[0] = self.topo_order.index(loss_node)+1
# for node in reversed(self.topo_order[:window[1]+1]):
# node_size = 4 # float32
# #print node, self.node_to_shape_map[node]
# for shape in self.node_to_shape_map[node]:
# node_size = node_size * shape
# if gpu_mem > node_size:
# gpu_mem = gpu_mem - node_size
# window[0] = window[0] - 1
# #print "gpu_mem:",gpu_mem
# # Traverse graph in topo order and compute values for all nodes.
# if FLAG_SHOW_GRAPH:
# print "run topo_order"
# # Show graph dependency
# if FLAG_SHOW_GRAPH:
# print "node:",node
# print "node.desc:",node.desc
# for node in self.topo_order:
# if self.policy == 'vdnn':
# # Skip placeholder nodes
# if node in node_to_val_map:
# continue
# # H2D before compute
# ## Collect inputs
# input_vals = []
# for n in node.inputs:
# if not use_numpy:
# if isinstance(node_to_val_map[n], np.ndarray):
# node_to_val_map[n] = ndarray.array(node_to_val_map[n], ctx=self.ctx)
# input_vals.append(node_to_val_map[n])
# ## Alloc node space
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = ndarray.empty(shape=self.node_to_shape_map[node], ctx=self.ctx)
# # Compute
# # node_val is modified in-place whether np.ndarray or NDArray
# node.op.compute(node, input_vals, node_val, use_numpy)
# # D2H after compute
# if use_numpy:
# node_to_val_map[node] = node_val
# else:
# node_index = self.topo_order.index(node)
# if node_index > window[0] and node_index < window[1]:
# node_to_val_map[node] = node_val
# continue
# node_to_val_map[node] = node_val.asnumpy()
# del node_val
# for n in node.inputs:
# if isinstance(node_to_val_map[n], ndarray.NDArray):
# tmp_val = node_to_val_map[n].asnumpy()
# del node_to_val_map[n]
# node_to_val_map[n] = tmp_val
# elif self.policy == 'swap':
# # Switch in GPU
# if not use_numpy:
# if self.swap_queue and (node_in_GPU==None):
# swap_node = self.swap_queue[0]
# if swap_node in node_to_val_map:
# node_to_val_map[swap_node] = ndarray.array(node_to_val_map[swap_node], ctx=self.ctx)
# else:
# self.node_to_arr_map[swap_node] = ndarray.empty(self.node_to_shape_map[swap_node], ctx=self.ctx)
# node_in_GPU = swap_node.id
# if node in node_to_val_map:
# # Skip placeholder nodes. Values already provided by feed_dict.
# continue
# # Compute
# input_vals = [node_to_val_map[n] for n in node.inputs]
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = self.node_to_arr_map[node]
# # node_val is modified in-place whether np.ndarray or NDArray
# node.op.compute(node, input_vals, node_val, use_numpy)
# if node.swap == True:
# node_to_val_map[node] = node_val.asnumpy()
# del node_val
# del self.node_to_arr_map[node]
# del self.swap_queue[0]
# node_in_GPU = None
# else:
# node_to_val_map[node] = node_val
# # Switch out GPU
# if not use_numpy:
# if self.swap_queue:
# if self.swap_queue[0] in node.inputs:
# out_node = self.swap_queue.pop(0)
# if self.swap_queue:
# if not self.swap_queue[0].id == node_in_GPU:
# tmp_array = node_to_val_map[out_node].asnumpy()
# del node_to_val_map[out_node]
# node_to_val_map[out_node] = tmp_array
# node_in_GPU = None
# else:
# if node in node_to_val_map:
# # Skip placeholder nodes. Values already provided by feed_dict.
# continue
# input_vals = [node_to_val_map[n] for n in node.inputs]
# # print self.node_to_shape_map[node]
# if use_numpy:
# node_val = np.empty(shape=self.node_to_shape_map[node])
# else:
# node_val = self.node_to_arr_map[node]
# # node_val is modified in-place whether np.ndarray or NDArray
# # if (len(node.inputs) == 1):
# # print "computs",node.inputs[0].name
# # else:
# # print "computs",node.inputs[0].name,node.inputs[1].name
# # print node.name
# # print node_val.shape
# # print "xxx"
# # print node.name
# node.op.compute(node, input_vals, node_val, use_numpy)
# # print "xxx"
# node_to_val_map[node] = node_val
# # print "xxx"
# if FLAG_SHOW_GRAPH:
# FLAG_SHOW_GRAPH = False
# # Collect node values.
# if not use_numpy and convert_to_numpy_ret_vals:
# if self.policy == 'swap':
# node_values = []
# for n in self.eval_node_list:
# if n.swap == True:
# node_values.append(node_to_val_map[n])
# else:
# node_values.append(node_to_val_map[n].asnumpy())
# return node_values
# elif self.policy == 'vdnn':
# return [node_to_val_map[n] for n in self.eval_node_list]
# else:
# return [node_to_val_map[n].asnumpy() for n in self.eval_node_list]
# return [node_to_val_map[n] for n in self.eval_node_list]
def gradients(output_node, node_list, scheduler_policy=None):
"""Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
"""
from . import OnesLike
node_to_output_grads_list = {}
node_to_output_grads_list[output_node] = [
OnesLike.oneslike_op(output_node)]
node_to_output_grad = {}
# Traverse forward graph in reverse topological order
reverse_topo_order = reversed(find_topo_sort([output_node]))
for node in reverse_topo_order:
output_grad = sum_node_list(node_to_output_grads_list[node])
node_to_output_grad[node] = output_grad
input_grads_list = node.op.gradient(node, output_grad)
#print len(node.name)
#print len(node.inputs)
#raw_input("\n\nPress the enter key to exit.")
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
# Calculate partial adjoint for input nodes.
# print node.name
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
if scheduler_policy == 'swap':
for node in node_list:
if node.swap:
node_to_output_grad[node].swap = True
grad_node_list = [node_to_output_grad[node] for node in node_list]
# grad_node_list = [distributed_communicate_op(node_to_output_grad[node]) for node in node_list]
return grad_node_list
def distributed_gradients(output_node, node_list, scheduler_policy=None):
"""Take gradient of output node with respect to each node in node_list.
Parameters
----------
output_node: output node that we are taking derivative of.
node_list: list of nodes that we are taking derivative wrt.
Returns
-------
A list of gradient values, one for each node in node_list respectively.
"""
from .OnesLike import oneslike_op
node_to_output_grads_list = {}
node_to_output_grads_list[output_node] = [oneslike_op(output_node)]
node_to_output_grad = {}
# Traverse forward graph in reverse topological order
reverse_topo_order = reversed(find_topo_sort([output_node]))
for node in reverse_topo_order:
output_grad = sum_node_list(node_to_output_grads_list[node])
node_to_output_grad[node] = output_grad
input_grads_list = node.op.gradient(node, output_grad)
#print len(node.name)
#print len(node.inputs)
#raw_input("\n\nPress the enter key to exit.")
for i in range(len(node.inputs)):
if node.inputs[i] not in node_to_output_grads_list:
node_to_output_grads_list[node.inputs[i]] = []
# Calculate partial adjoint for input nodes.
node_to_output_grads_list[node.inputs[i]].append(
input_grads_list[i])
if scheduler_policy == 'swap':
for node in node_list:
if node.swap:
node_to_output_grad[node].swap = True
# grad_node_list = [node_to_output_grad[node] for node in node_list]
grad_node_list = [distributed_communicate_op(
node_to_output_grad[node]) for node in node_list]
return grad_node_list
##################
# Helper Methods #
##################
def find_topo_sort(node_list):
"""Given a list of nodes, return a topo ordering of nodes ending in them.
A simple algorithm is to do a post-order DFS traversal on the given nodes,
going backwards based on input edges. Since a node is added to the ordering
after all its predecessors are traversed due to post-order DFS, we get a
topological sort.
"""
visited = set()
topo_order = []
for node in node_list:
topo_sort_dfs(node, visited, topo_order)
return topo_order
def topo_sort_dfs(node, visited, topo_order):
"""Post-order DFS"""
if node in visited:
return
visited.add(node)
for n in node.inputs:
topo_sort_dfs(n, visited, topo_order)
topo_order.append(node)
def sum_node_list(node_list):
"""Custom sum func to avoid creating redundant nodes in Python sum func."""
from operator import add
from functools import reduce
return reduce(add, node_list)
def broadcast_rule(shape_a, shape_b):
"""Return output shape of broadcast shape_a, shape_b.
e.g. broadcast_rule((3,2), (4,3,2))
returns output_shape = (4,3,2)
Check out explanations and more examples at
https://docs.scipy.org/doc/numpy-1.10.0/user/basics.broadcasting.html
http://eli.thegreenplace.net/2015/broadcasting-arrays-in-numpy/
"""
assert(isinstance(shape_a, tuple))
assert(isinstance(shape_b, tuple))
if len(shape_a) > len(shape_b):
longer_shape, shorter_shape = shape_a, shape_b
else:
longer_shape, shorter_shape = shape_b, shape_a
len_diff = len(longer_shape) - len(shorter_shape)
for i in range(len_diff):
# pad with leading 1s
shorter_shape = (1,) + shorter_shape
assert len(shorter_shape) == len(longer_shape)
output_shape = list(longer_shape)
for i in range(len(output_shape)):
assert (shorter_shape[i] == longer_shape[i]) \
or (shorter_shape[i] == 1) \
or (longer_shape[i] == 1)
output_shape[i] = max(shorter_shape[i], longer_shape[i])
return tuple(output_shape)
| 40.091973 | 116 | 0.591074 | [
"Apache-2.0"
] | DMALab/TSplit | python/athena/gpu_ops/StreamExecutor.py | 23,975 | Python |
"""
TickerHandler
This implements an efficient Ticker which uses a subscription
model to 'tick' subscribed objects at regular intervals.
The ticker mechanism is used by importing and accessing
the instantiated TICKER_HANDLER instance in this module. This
instance is run by the server; it will save its status across
server reloads and be started automaticall on boot.
Example:
```python
from evennia.scripts.tickerhandler import TICKER_HANDLER
# call tick myobj.at_tick(*args, **kwargs) every 15 seconds
TICKER_HANDLER.add(15, myobj.at_tick, *args, **kwargs)
```
You supply the interval to tick and a callable to call regularly
with any extra args/kwargs. The handler will transparently set
up and add new timers behind the scenes to tick at given intervals,
using a TickerPool - all callables with the same interval will share
the interval ticker.
To remove:
```python
TICKER_HANDLER.remove(15, myobj.at_tick)
```
Both interval and callable must be given since a single object can be subscribed
to many different tickers at the same time. You can also supply `idstring`
as an identifying string if you ever want to tick the callable at the same interval
but with different arguments (args/kwargs are not used for identifying the ticker). There
is also `persistent=False` if you don't want to make a ticker that don't survive a reload.
If either or both `idstring` or `persistent` has been changed from their defaults, they
must be supplied to the `TICKER_HANDLER.remove` call to properly identify the ticker
to remove.
The TickerHandler's functionality can be overloaded by modifying the
Ticker class and then changing TickerPool and TickerHandler to use the
custom classes
```python
class MyTicker(Ticker):
# [doing custom stuff]
class MyTickerPool(TickerPool):
ticker_class = MyTicker
class MyTickerHandler(TickerHandler):
ticker_pool_class = MyTickerPool
```
If one wants to duplicate TICKER_HANDLER's auto-saving feature in
a custom handler one can make a custom `AT_STARTSTOP_MODULE` entry to
call the handler's `save()` and `restore()` methods when the server reboots.
"""
import inspect
from builtins import object
from twisted.internet.defer import inlineCallbacks
from django.core.exceptions import ObjectDoesNotExist
from evennia.scripts.scripts import ExtendedLoopingCall
from evennia.server.models import ServerConfig
from evennia.utils.logger import log_trace, log_err
from evennia.utils.dbserialize import dbserialize, dbunserialize, pack_dbobj, unpack_dbobj
from evennia.utils import variable_from_module
_GA = object.__getattribute__
_SA = object.__setattr__
_ERROR_ADD_TICKER = \
"""TickerHandler: Tried to add an invalid ticker:
{storekey}
Ticker was not added."""
class Ticker(object):
"""
Represents a repeatedly running task that calls
hooks repeatedly. Overload `_callback` to change the
way it operates.
"""
@inlineCallbacks
def _callback(self):
"""
This will be called repeatedly every `self.interval` seconds.
`self.subscriptions` contain tuples of (obj, args, kwargs) for
each subscribing object.
If overloading, this callback is expected to handle all
subscriptions when it is triggered. It should not return
anything and should not traceback on poorly designed hooks.
The callback should ideally work under @inlineCallbacks so it
can yield appropriately.
The _hook_key, which is passed down through the handler via
kwargs is used here to identify which hook method to call.
"""
self._to_add = []
self._to_remove = []
self._is_ticking = True
for store_key, (args, kwargs) in self.subscriptions.iteritems():
callback = yield kwargs.pop("_callback", "at_tick")
obj = yield kwargs.pop("_obj", None)
try:
if callable(callback):
# call directly
yield callback(*args, **kwargs)
continue
# try object method
if not obj or not obj.pk:
# object was deleted between calls
self._to_remove.append(store_key)
continue
else:
yield _GA(obj, callback)(*args, **kwargs)
except ObjectDoesNotExist:
log_trace("Removing ticker.")
self._to_remove.append(store_key)
except Exception:
log_trace()
finally:
# make sure to re-store
kwargs["_callback"] = callback
kwargs["_obj"] = obj
# cleanup - we do this here to avoid changing the subscription dict while it loops
self._is_ticking = False
for store_key in self._to_remove:
self.remove(store_key)
for store_key, (args, kwargs) in self._to_add:
self.add(store_key, *args, **kwargs)
self._to_remove = []
self._to_add = []
def __init__(self, interval):
"""
Set up the ticker
Args:
interval (int): The stepping interval.
"""
self.interval = interval
self.subscriptions = {}
self._is_ticking = False
self._to_remove = []
self._to_add = []
# set up a twisted asynchronous repeat call
self.task = ExtendedLoopingCall(self._callback)
def validate(self, start_delay=None):
"""
Start/stop the task depending on how many subscribers we have
using it.
Args:
start_delay (int): Time to way before starting.
"""
subs = self.subscriptions
if self.task.running:
if not subs:
self.task.stop()
elif subs:
self.task.start(self.interval, now=False, start_delay=start_delay)
def add(self, store_key, *args, **kwargs):
"""
Sign up a subscriber to this ticker.
Args:
store_key (str): Unique storage hash for this ticker subscription.
args (any, optional): Arguments to call the hook method with.
Kwargs:
_start_delay (int): If set, this will be
used to delay the start of the trigger instead of
`interval`.
"""
if self._is_ticking:
# protects the subscription dict from
# updating while it is looping
self._to_start.append((store_key, (args, kwargs)))
else:
start_delay = kwargs.pop("_start_delay", None)
self.subscriptions[store_key] = (args, kwargs)
self.validate(start_delay=start_delay)
def remove(self, store_key):
"""
Unsubscribe object from this ticker
Args:
store_key (str): Unique store key.
"""
if self._is_ticking:
# this protects the subscription dict from
# updating while it is looping
self._to_remove.append(store_key)
else:
self.subscriptions.pop(store_key, False)
self.validate()
def stop(self):
"""
Kill the Task, regardless of subscriptions.
"""
self.subscriptions = {}
self.validate()
class TickerPool(object):
"""
This maintains a pool of
`evennia.scripts.scripts.ExtendedLoopingCall` tasks for calling
subscribed objects at given times.
"""
ticker_class = Ticker
def __init__(self):
"""
Initialize the pool.
"""
self.tickers = {}
def add(self, store_key, *args, **kwargs):
"""
Add new ticker subscriber.
Args:
store_key (str): Unique storage hash.
args (any, optional): Arguments to send to the hook method.
"""
_, _, _, interval, _, _ = store_key
if not interval:
log_err(_ERROR_ADD_TICKER.format(store_key=store_key))
return
if interval not in self.tickers:
self.tickers[interval] = self.ticker_class(interval)
self.tickers[interval].add(store_key, *args, **kwargs)
def remove(self, store_key):
"""
Remove subscription from pool.
Args:
store_key (str): Unique storage hash to remove
"""
_, _, _, interval, _, _ = store_key
if interval in self.tickers:
self.tickers[interval].remove(store_key)
if not self.tickers[interval]:
del self.tickers[interval]
def stop(self, interval=None):
"""
Stop all scripts in pool. This is done at server reload since
restoring the pool will automatically re-populate the pool.
Args:
interval (int, optional): Only stop tickers with this
interval.
"""
if interval and interval in self.tickers:
self.tickers[interval].stop()
else:
for ticker in self.tickers.values():
ticker.stop()
class TickerHandler(object):
"""
The Tickerhandler maintains a pool of tasks for subscribing
objects to various tick rates. The pool maintains creation
instructions and and re-applies them at a server restart.
"""
ticker_pool_class = TickerPool
def __init__(self, save_name="ticker_storage"):
"""
Initialize handler
save_name (str, optional): The name of the ServerConfig
instance to store the handler state persistently.
"""
self.ticker_storage = {}
self.save_name = save_name
self.ticker_pool = self.ticker_pool_class()
def _get_callback(self, callback):
"""
Analyze callback and determine its consituents
Args:
callback (function or method): This is either a stand-alone
function or class method on a typeclassed entitye (that is,
an entity that can be saved to the database).
Returns:
ret (tuple): This is a tuple of the form `(obj, path, callfunc)`,
where `obj` is the database object the callback is defined on
if it's a method (otherwise `None`) and vice-versa, `path` is
the python-path to the stand-alone function (`None` if a method).
The `callfunc` is either the name of the method to call or the
callable function object itself.
"""
outobj, outpath, outcallfunc = None, None, None
if callable(callback):
if inspect.ismethod(callback):
outobj = callback.im_self
outcallfunc = callback.im_func.func_name
elif inspect.isfunction(callback):
outpath = "%s.%s" % (callback.__module__, callback.func_name)
outcallfunc = callback
else:
raise TypeError("%s is not a callable function or method." % callback)
return outobj, outpath, outcallfunc
def _store_key(self, obj, path, interval, callfunc, idstring="", persistent=True):
"""
Tries to create a store_key for the object.
Args:
obj (Object, tuple or None): Subscribing object if any. If a tuple, this is
a packed_obj tuple from dbserialize.
path (str or None): Python-path to callable, if any.
interval (int): Ticker interval.
callfunc (callable or str): This is either the callable function or
the name of the method to call. Note that the callable is never
stored in the key; that is uniquely identified with the python-path.
idstring (str, optional): Additional separator between
different subscription types.
persistent (bool, optional): If this ticker should survive a system
shutdown or not.
Returns:
store_key (tuple): A tuple `(packed_obj, methodname, outpath, interval,
idstring, persistent)` that uniquely identifies the
ticker. Here, `packed_obj` is the unique string representation of the
object or `None`. The `methodname` is the string name of the method on
`packed_obj` to call, or `None` if `packed_obj` is unset. `path` is
the Python-path to a non-method callable, or `None`. Finally, `interval`
`idstring` and `persistent` are integers, strings and bools respectively.
"""
interval = int(interval)
persistent = bool(persistent)
packed_obj = pack_dbobj(obj)
methodname = callfunc if callfunc and isinstance(callfunc, basestring) else None
outpath = path if path and isinstance(path, basestring) else None
return (packed_obj, methodname, outpath, interval, idstring, persistent)
def save(self):
"""
Save ticker_storage as a serialized string into a temporary
ServerConf field. Whereas saving is done on the fly, if called
by server when it shuts down, the current timer of each ticker
will be saved so it can start over from that point.
"""
if self.ticker_storage:
# get the current times so the tickers can be restarted with a delay later
start_delays = dict((interval, ticker.task.next_call_time())
for interval, ticker in self.ticker_pool.tickers.items())
# remove any subscriptions that lost its object in the interim
to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items()
if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and
hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj
store_key[2])} # a path given
# update the timers for the tickers
for store_key, (args, kwargs) in to_save.items():
interval = store_key[1]
# this is a mutable, so it's updated in-place in ticker_storage
kwargs["_start_delay"] = start_delays.get(interval, None)
ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save))
else:
# make sure we have nothing lingering in the database
ServerConfig.objects.conf(key=self.save_name, delete=True)
def restore(self, server_reload=True):
"""
Restore ticker_storage from database and re-initialize the
handler from storage. This is triggered by the server at
restart.
Args:
server_reload (bool, optional): If this is False, it means
the server went through a cold reboot and all
non-persistent tickers must be killed.
"""
# load stored command instructions and use them to re-initialize handler
restored_tickers = ServerConfig.objects.conf(key=self.save_name)
if restored_tickers:
# the dbunserialize will convert all serialized dbobjs to real objects
restored_tickers = dbunserialize(restored_tickers)
self.ticker_storage = {}
for store_key, (args, kwargs) in restored_tickers.iteritems():
try:
# at this point obj is the actual object (or None) due to how
# the dbunserialize works
obj, callfunc, path, interval, idstring, persistent = store_key
if not persistent and not server_reload:
# this ticker will not be restarted
continue
if isinstance(callfunc, basestring) and not obj:
# methods must have an existing object
continue
# we must rebuild the store_key here since obj must not be
# stored as the object itself for the store_key to be hashable.
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
if obj and callfunc:
kwargs["_callback"] = callfunc
kwargs["_obj"] = obj
elif path:
modname, varname = path.rsplit(".", 1)
callback = variable_from_module(modname, varname)
kwargs["_callback"] = callback
kwargs["_obj"] = None
else:
# Neither object nor path - discard this ticker
log_err("Tickerhandler: Removing malformed ticker: %s" % str(store_key))
continue
except Exception:
# this suggests a malformed save or missing objects
log_trace("Tickerhandler: Removing malformed ticker: %s" % str(store_key))
continue
# if we get here we should create a new ticker
self.ticker_storage[store_key] = (args, kwargs)
self.ticker_pool.add(store_key, *args, **kwargs)
def add(self, interval=60, callback=None, idstring="", persistent=True, *args, **kwargs):
"""
Add subscription to tickerhandler
Args:
interval (int, optional): Interval in seconds between calling
`callable(*args, **kwargs)`
callable (callable function or method, optional): This
should either be a stand-alone function or a method on a
typeclassed entity (that is, one that can be saved to the
database).
idstring (str, optional): Identifier for separating
this ticker-subscription from others with the same
interval. Allows for managing multiple calls with
the same time interval and callback.
persistent (bool, optional): A ticker will always survive
a server reload. If this is unset, the ticker will be
deleted by a server shutdown.
args, kwargs (optional): These will be passed into the
callback every time it is called.
Notes:
The callback will be identified by type and stored either as
as combination of serialized database object + methodname or
as a python-path to the module + funcname. These strings will
be combined iwth `interval` and `idstring` to define a
unique storage key for saving. These must thus all be supplied
when wanting to modify/remove the ticker later.
"""
if isinstance(callback, int):
raise RuntimeError("TICKER_HANDLER.add has changed: "
"the interval is now the first argument, callback the second.")
obj, path, callfunc = self._get_callback(callback)
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
kwargs["_obj"] = obj
kwargs["_callback"] = callfunc # either method-name or callable
self.ticker_storage[store_key] = (args, kwargs)
self.ticker_pool.add(store_key, *args, **kwargs)
self.save()
def remove(self, interval=60, callback=None, idstring="", persistent=True):
"""
Remove object from ticker or only remove it from tickers with
a given interval.
Args:
interval (int, optional): Interval of ticker to remove.
callback (callable function or method): Either a function or
the method of a typeclassed object.
idstring (str, optional): Identifier id of ticker to remove.
"""
if isinstance(callback, int):
raise RuntimeError("TICKER_HANDLER.remove has changed: "
"the interval is now the first argument, callback the second.")
obj, path, callfunc = self._get_callback(callback)
store_key = self._store_key(obj, path, interval, callfunc, idstring, persistent)
to_remove = self.ticker_storage.pop(store_key, None)
if to_remove:
self.ticker_pool.remove(store_key)
self.save()
def clear(self, interval=None):
"""
Stop/remove tickers from handler.
Args:
interval (int): Only stop tickers with this interval.
Notes:
This is the only supported way to kill tickers related to
non-db objects.
"""
self.ticker_pool.stop(interval)
if interval:
self.ticker_storage = dict((store_key, store_key)
for store_key in self.ticker_storage
if store_key[1] != interval)
else:
self.ticker_storage = {}
self.save()
def all(self, interval=None):
"""
Get all subscriptions.
Args:
interval (int): Limit match to tickers with this interval.
Returns:
tickers (list): If `interval` was given, this is a list of
tickers using that interval.
tickerpool_layout (dict): If `interval` was *not* given,
this is a dict {interval1: [ticker1, ticker2, ...], ...}
"""
if interval is None:
# return dict of all, ordered by interval
return dict((interval, ticker.subscriptions)
for interval, ticker in self.ticker_pool.tickers.iteritems())
else:
# get individual interval
ticker = self.ticker_pool.tickers.get(interval, None)
if ticker:
return {interval: ticker.subscriptions}
def all_display(self):
"""
Get all tickers on an easily displayable form.
Returns:
tickers (dict): A list of all storekeys
"""
store_keys = []
for ticker in self.ticker_pool.tickers.itervalues():
for (objtup, callfunc, path, interval, idstring, persistent), (args, kwargs) in ticker.subscriptions.iteritems():
store_keys.append((kwargs.get("_obj", None), callfunc, path, interval, idstring, persistent))
return store_keys
# main tickerhandler
TICKER_HANDLER = TickerHandler()
| 38.568966 | 125 | 0.60903 | [
"BSD-3-Clause"
] | orkim/evennia | evennia/scripts/tickerhandler.py | 22,370 | Python |
# ======================================================================
# The Stars Align
# Advent of Code 2018 Day 10 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# a o c _ 1 0 . p y
# ======================================================================
"Solve the puzzles for Advent of Code 2018 day 10"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import argparse
import sys
import lights
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# parse_commnd_line
# ----------------------------------------------------------------------
def parse_command_line():
"Parse the command line options"
# 1. Create the command line parser
desc = 'The Stars Align - Day 10 of Advent of Code 2018'
sample = 'sample: python aoc_10.py input.txt'
parser = argparse.ArgumentParser(description=desc,
epilog=sample)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
dest='verbose', help='Print status messages to stdout')
parser.add_argument('-p', '--part', action='store', default=1, type=int,
dest='part', help='Puzzle Part (1 or 2)')
parser.add_argument('-l', '--limit', action='store', default=0, type=int,
dest='limit',
help='Maximum limit (e.g., time, size, recursion) before stopping')
parser.add_argument('filepath', metavar='FILENAME', action='store', type=str,
help="Location of puzzle input")
# 2. Get the options and arguments
return parser.parse_args()
# ----------------------------------------------------------------------
# part_one
# ----------------------------------------------------------------------
def part_one(args, input_lines):
"Process part one of the puzzle"
# 1. Create the puzzle solver
solver = lights.Lights(part2=False, text=input_lines)
# 2. Determine the solution for part one
solution = solver.part_one(verbose=args.verbose, limit=args.limit)
if solution is None:
print("There is no solution")
else:
print("The solution for part one is %s" % (solution))
# 3. Return result
return solution is not None
# ----------------------------------------------------------------------
# part_two
# ----------------------------------------------------------------------
def part_two(args, input_lines):
"Process part two of the puzzle"
# 1. Create the puzzle solver
solver = lights.Lights(part2=True, text=input_lines)
# 2. Determine the solution for part two
solution = solver.part_two(verbose=args.verbose, limit=args.limit)
if solution is None:
print("There is no solution")
else:
print("The solution for part two is %s" % (solution))
# 3. Return result
return solution is not None
# ----------------------------------------------------------------------
# from_file
# ----------------------------------------------------------------------
def from_file(filepath):
"Read the file"
return from_text(open(filepath).read())
# ----------------------------------------------------------------------
# from_text
# ----------------------------------------------------------------------
def from_text(text):
"Break the text into trimed, non-comment lines"
# 1. We start with no lines
lines = []
# 2. Loop for lines in the text
for line in text.split('\n'):
# 3. But ignore blank and non-claim lines
line = line.rstrip(' \r')
if not line:
continue
if line.startswith('!'):
continue
# 4. Add the line
lines.append(line)
# 5. Return a list of clean lines
return lines
# ----------------------------------------------------------------------
# main
# ----------------------------------------------------------------------
def main():
"Read the Advent of Code problem and solve it"
# 1. Get the command line options
args = parse_command_line()
# 2. Read the puzzle file
input_text = from_file(args.filepath)
# 3. Process the appropiate part of the puzzle
if args.part == 1:
result = part_one(args, input_text)
else:
result = part_two(args, input_text)
# 5. Set return code (0 if solution found, 2 if not)
if result:
sys.exit(0)
sys.exit(2)
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
# ======================================================================
# end a o c _ 1 0 . p y end
# ======================================================================
| 35.179641 | 91 | 0.372936 | [
"MIT"
] | deanearlwright/AdventOfCode | 2018/10_TheStarsAlign/aoc_10.py | 5,875 | Python |
from pyecharts import options as opts
from pyecharts.charts import Map
import pandas as pd
import namemap
def read_country_code():
"""
获取国家中英文字典
:return:
"""
country_dict = {}
for key, val in namemap.nameMap.items(): # 将 nameMap 列表里面键值互换
country_dict[val] = key
return country_dict
def read_csv():
"""
读取数据,返回国家英文名称列表和累计确诊数列表
:return:
"""
country_dict = read_country_code()
data = pd.read_csv("2019-nCoV.csv", index_col=False)
countrys_names = list()
confirmed_count = list()
for x in range(len(data.index)):
if data['name'].iloc[x] in country_dict.keys():
countrys_names.append(country_dict[data['name'].iloc[x]])
confirmed_count.append(data['confirm'].iloc[x])
else:
print(data['name'].iloc[x])
return countrys_names, confirmed_count
def draw_map():
"""
绘制世界地图
遇到一个很神奇的问题:
两个列表必须写死数据地图才会渲染数据,如果数据是从方法中获得,则地图不渲染数据
:return:
"""
# 修复注释中的问题,原因是 confirmed_count 中的 int 是 numpy 的 int ,需转化为 python 中的 int
# 感谢公众号的 @李康伟 同学提出
countrys_names, confirmed_count = read_csv()
confirmed_count_list = []
for item in confirmed_count:
confirmed_count_list.append(int(item))
# countrys_names = ['United States', 'Brazil', 'Russia', 'Spain', 'United Kingdom', 'Italy', 'France', 'Germany', 'Turkey', 'Iran', 'India', 'Peru', 'Canada', 'Saudi Arabia', 'Mexico', 'Chile', 'Belgium', 'Pakistan', 'Netherlands', 'Qatar', 'Ecuador', 'Belarus', 'Sweden', 'Bangladesh', 'Singapore Rep.', 'Switzerland', 'Portugal', 'United Arab Emirates', 'Ireland', 'Indonesia', 'South Africa', 'Poland', 'Ukraine', 'Kuwait', 'Colombia', 'Romania', 'Israel', 'Japan', 'Egypt', 'Austria', 'Dominican Rep.', 'Philippines', 'Denmark', 'Argentina', 'Korea', 'Serbia', 'Panama', 'Afghanistan', 'Czech Rep.', 'Norway', 'Kazakhstan', 'Algeria', 'Nigeria', 'Morocco', 'Oman', 'Malaysia', 'Australia', 'Moldova', 'Ghana', 'Finland', 'Armenia', 'Bolivia', 'Cameroon', 'Iraq', 'Luxembourg', 'Azerbaijan', 'Honduras', 'Hungary', 'Sudan', 'Guinea', 'Uzbekistan', 'Guatemala', 'Thailand', 'Senegal', 'Greece', 'Tajikistan', 'Bulgaria', "Côte d'Ivoire", 'Djibouti', 'Croatia', 'Gabon', 'Cuba', 'Estonia', 'El Salvador', 'Iceland', 'Lithuania', 'Somalia', 'New Zealand', 'Slovakia', 'Slovenia', 'Kyrgyzstan', 'Kenya', 'Guinea Bissau', 'Lebanon', 'Sri Lanka', 'Tunisia', 'Latvia', 'Mali', 'Venezuela', 'Albania', 'Eq. Guinea', 'Niger', 'Cyprus', 'Zambia', 'Costa Rica', 'Haiti', 'Paraguay', 'Burkina Faso', 'Uruguay', 'Georgia', 'Jordan', 'Chad', 'Sierra Leone', 'Nepal', 'Jamaica', 'Tanzania', 'Ethiopia', 'Madagascar', 'Palestine', 'Togo', 'Vietnam', 'Rwanda', 'Montenegro', 'Nicaragua', 'Liberia', 'Swaziland', 'Mauritania', 'Yemen', 'Myanmar', 'Uganda', 'Mozambique', 'Mongolia', 'Brunei', 'Benin', 'Guyana', 'Cambodia', 'The Bahamas', 'Malawi', 'Libya', 'Syria', 'Angola', 'Zimbabwe', 'Burundi', 'Eritrea', 'Botswana', 'Gambia', 'Bhutan', 'East Timor', 'Namibia', 'Lao PDR', 'Fiji', 'Belize', 'Suriname', 'Papua New Guinea', 'Lesotho']
#
# confirmed_count = [1666828, 347398, 335882, 281904, 258504, 229327, 182036, 179986, 155686, 133521, 131920, 115754, 85151, 70161, 65856, 65393, 56810, 54601, 45265, 42213, 36258, 35244, 33188, 32078, 31068, 30725, 30471, 28704, 24582, 21745, 21343, 20931, 20580, 20464, 20177, 17857, 16712, 16536, 16513, 16486, 14422, 13777, 11487, 11353, 11190, 11092, 10577, 9998, 8890, 8346, 8322, 8113, 7526, 7406, 7257, 7185, 7114, 6994, 6617, 6568, 6302, 5915, 4400, 4272, 3990, 3982, 3743, 3741, 3628, 3176, 3132, 3054, 3040, 2976, 2876, 2738, 2427, 2366, 2270, 2243, 1934, 1931, 1821, 1819, 1804, 1616, 1594, 1504, 1504, 1468, 1403, 1192, 1114, 1097, 1089, 1048, 1046, 1015, 1010, 989, 960, 943, 927, 920, 918, 865, 850, 814, 764, 728, 704, 648, 621, 584, 550, 509, 494, 488, 423, 373, 325, 325, 324, 279, 255, 238, 227, 212, 201, 198, 168, 141, 141, 135, 127, 124, 100, 82, 75, 70, 61, 56, 42, 39, 30, 25, 24, 24, 20, 19, 18, 18, 11, 8, 2]
c = (
Map()
.add(
"确诊人数",
[list(z) for z in zip(countrys_names, confirmed_count_list)],
is_map_symbol_show=False,
maptype="world",
label_opts=opts.LabelOpts(is_show=False),
itemstyle_opts=opts.ItemStyleOpts(color="rgb(49,60,72)")
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False))
.set_global_opts(
title_opts=opts.TitleOpts(title="全球 2019-nCoV 地图"),
visualmap_opts=opts.VisualMapOpts(max_=1700000),
)
.render("map_world.html")
)
if __name__ == '__main__':
draw_map()
| 60.101266 | 1,829 | 0.624684 | [
"MIT"
] | DearCasper/python-learning | python-data-analysis/2019-nCoV-global/global_map.py | 5,031 | Python |
"""
2. Categorical Predictors
=========================
"""
###############################################################################
# The syntax for handling categorical predictors is **different** between standard regression models/two-stage-models (i.e. :code:`Lm` and :code:`Lm2`) and multi-level models (:code:`Lmer`) in :code:`pymer4`. This is because formula parsing is passed to R for :code:`Lmer` models, but handled by Python for other models.
###############################################################################
# Lm and Lm2 Models
# -----------------
# :code:`Lm` and :code:`Lm2` models use `patsy <https://patsy.readthedocs.io/en/latest/>`_ to parse model formulae. Patsy is very powerful and has built-in support for handling categorical coding schemes by wrapping a predictor in then :code:`C()` *within* the module formula. Patsy can also perform some pre-processing such as scaling and standardization using special functions like :code:`center()`. Here are some examples.
# import basic libraries and sample data
import os
import pandas as pd
from pymer4.utils import get_resource_path
from pymer4.models import Lm
# IV3 is a categorical predictors with 3 levels in the sample data
df = pd.read_csv(os.path.join(get_resource_path(), "sample_data.csv"))
###############################################################################
# Dummy-coded/Treatment contrasts
# +++++++++++++++++++++++++++++++
# Estimate a model using Treatment contrasts (dummy-coding)
# with '1.0' as the reference level
# This is the default of the C() function
model = Lm("DV ~ C(IV3, levels=[1.0, 0.5, 1.5])", data=df)
print(model.fit())
###############################################################################
# Orthogonal Polynomial Contrasts
# +++++++++++++++++++++++++++++++
# Patsy can do this using the Poly argument to the
# C() function
model = Lm("DV ~ C(IV3, Poly)", data=df)
print(model.fit())
###############################################################################
# Sum-to-zero contrasts
# +++++++++++++++++++++
# Similar to before but with the Sum argument
model = Lm("DV ~ C(IV3, Sum)", data=df)
print(model.fit())
###############################################################################
# Scaling/Centering
# +++++++++++++++++
# Moderation with IV2, but centering IV2 first
model = Lm("DV ~ center(IV2) * C(IV3, Sum)", data=df)
print(model.fit())
###############################################################################
# Please refer to the `patsy documentation <https://patsy.readthedocs.io/en/latest/categorical-coding.html>`_ for more details when working categorical predictors in :code:`Lm` or :code:`Lm2` models.
###############################################################################
# Lmer Models
# -----------
# :code:`Lmer()` models currently have support for handling categorical predictors in one of three ways based on how R's :code:`factor()` works (see the note at the end of this tutorial):
#
# - Dummy-coded factor levels (treatment contrasts) in which each model term is the difference between a factor level and a selected reference level
# - Orthogonal polynomial contrasts in which each model term is a polynomial contrast across factor levels (e.g. linear, quadratic, cubic, etc)
# - Custom contrasts for each level of a factor, which should be provided in the manner expected by R.
#
# To make re-parameterizing models easier, factor codings are passed as a dictionary to the :code:`factors` argument of a model's :code:`.fit()`. This obviates the need for adjusting data-frame properties as in R. Note that this is **different** from :code:`Lm` and :code:`Lm2` models above which expect factor codings in their formulae (because patsy does).
#
# Each of these ways also enables you to easily compute post-hoc comparisons between factor levels, as well as interactions between continuous predictors and each factor level. See tutorial 3 for more on post-hoc tests.
from pymer4.models import Lmer
# We're going to fit a multi-level logistic regression using the
# dichotomous DV_l variable and the same categorical predictor (IV3)
# as before
model = Lmer("DV_l ~ IV3 + (IV3|Group)", data=df, family="binomial")
###############################################################################
# Dummy-coding factors
# ++++++++++++++++++++
# First we'll use dummy-coding/treatment contrasts with 1.0 as the reference level. This will compute two coefficients: 0.5 > 1.0 and 1.5 > 1.0.
print(model.fit(factors={"IV3": ["1.0", "0.5", "1.5"]}))
###############################################################################
# Polynomial contrast coding
# ++++++++++++++++++++++++++
# Second we'll use orthogonal polynomial contrasts. This is accomplished using the :code:`ordered=True` argument and specifying the order of the *linear* contrast in increasing order. R will automatically compute higher order polynomial contrats that are orthogonal to this linear contrast. In this example, since there are 3 factor levels this will result in two polynomial terms: a linear contrast we specify below corresponding to 0.5 < 1.0 < 1.5 and an orthogonal quadratic contrast automatically determined by R, corresponding to 0.5 > 1 < 1.5
print(model.fit(factors={"IV3": ["0.5", "1.0", "1.5"]}, ordered=True))
###############################################################################
# Custom contrasts
# ++++++++++++++++
# :code:`Lmer` models can also take custom factor contrasts based on how they are expected by R (see the note at the end of this tutorial for how contrasts work in R). Remember that there can be at most k-1 model terms representing any k level factor without over-parameterizing a model. If you specify a custom contrast, R will generate set of orthogonal contrasts for the rest of your model terms.
# Compare level '1.0' to the mean of levels '0.5' and '1.5'
# and let R determine the second contrast orthogonal to it
print(model.fit(factors={"IV3": {"1.0": 1, "0.5": -0.5, "1.5": -0.5}}))
###############################################################################
# User-created contrasts (without R)
# ++++++++++++++++++++++++++++++++++
# Another option available to you is fitting a model with *only* your desired contrast(s) rather than a full set of k-1 contrasts. Contrary to how statistics is usually taught, you don't ever *have to* include a full set of k-1 contrasts for a k level factor! The upside to doing this is that you won't need to rely on R to compute anything for you (aside from the model fit), and you will have a model with exactly the number of terms as contrasts you desire, giving you complete control. The downside is that post-hoc tests will no longer be available (see tutorial 3 for more information on post-hoc tests), but it's unlikely you're doing post-hoc tests if you are computing a subset of specific contrasts anyway. This is also a useful approach if you don't want to use patsy's formula syntax with :code:`Lm` and :code:`Lm2` as noted above.
#
# This can be accomplished by creating new columns in your dataframe to test specific hypotheses and is trivial to do with pandas `map <https://pandas.pydata.org/pandas-docs/version/0.25/reference/api/pandas.Series.map.html/>`_ and `assign <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.assign.html/>`_ methods. For example, here we manually compute a linear contrast by creating a new column in our dataframe and treating it as a continuous variable.
# Create a new column in the dataframe with a custom (linear) contrast
df = df.assign(IV3_custom_lin=df["IV3"].map({0.5: -1, 1.0: 0, 1.5: 1}))
print(df.head())
###############################################################################
# Now we can use this variable as a continuous predictor without the need for the :code:`factors` argument. Notice how the z-stat and p-value of the estimate are the same as the linear polynomial contrast estimated above. The coefficients differ in scale only because R uses [~-0.707, ~0, ~0.707] for its polynomial contrasts rather than [-1, 0, 1] like we did.
# Estimate model
model = Lmer(
"DV_l ~ IV3_custom_lin + (IV3_custom_lin|Group)", data=df, family="binomial"
)
print(model.fit())
###############################################################################
# A note on how contrasts in R work
# ---------------------------------
# .. note::
# This is just for folks curious about how contrasts in R work
#
# Specifying multiple custom contrasts in R has always been a point of confusion amongst users. This because the :code:`contrasts()` command in R doesn't actually expect contrast weights (i.e. a design matrix) as one would intuit. Rather, it is made for generating contrast coding schemes which are the inverse of the contrast weight matrix. For a longer explanation with examples see `this reference <https://rstudio-pubs-static.s3.amazonaws.com/65059_586f394d8eb84f84b1baaf56ffb6b47f.html>`_ and `this reference <https://github.com/ejolly/R/blob/master/Guides/Contrasts_in_R.md>`_. For these situations pymer4 offers a few utility functions to convert between these matrix types if desired in :code:`pymer4.utils`: :code:`R2con()` and :code:`con2R()`.
| 69.398496 | 843 | 0.638245 | [
"MIT"
] | Shotgunosine/pymer4 | docs/auto_examples/example_02_categorical.py | 9,230 | Python |
# print('Reading templates/__init__.py')
from .errors import *
import logging
logging.debug('Reading src/templates/__init__.py')
| 18.714286 | 50 | 0.770992 | [
"MIT"
] | honzatomek/pythonFEA | src/pythonFEA/templates/__init__.py | 131 | Python |
from raw.ndfd import *
| 11.5 | 22 | 0.73913 | [
"Apache-2.0"
] | EliteUSA/pyxb | examples/ndfd/ndfd.py | 23 | Python |
import multiprocessing
import warnings
import six
from chainer.backends import cuda
from chainer.dataset import convert
from chainer import reporter
from chainer.training.updaters import standard_updater
try:
from cupy.cuda import nccl
_available = True
except Exception:
_available = False
import numpy
class _Worker(multiprocessing.Process):
def __init__(self, proc_id, pipe, master):
super(_Worker, self).__init__()
self.proc_id = proc_id
self.pipe = pipe
self.converter = master.converter
self.model = master._master
self.device = master._devices[proc_id]
self.iterator = master._mpu_iterators[proc_id]
self.n_devices = len(master._devices)
def setup(self):
_, comm_id = self.pipe.recv()
self.comm = nccl.NcclCommunicator(self.n_devices, comm_id,
self.proc_id)
self.model.to_gpu(self.device)
self.reporter = reporter.Reporter()
self.reporter.add_observer('main', self.model)
self.reporter.add_observers('main',
self.model.namedlinks(skipself=True))
def run(self):
dev = cuda.Device(self.device)
dev.use()
self.setup()
while True:
job, data = self.pipe.recv()
if job == 'finalize':
dev.synchronize()
break
if job == 'update':
# For reducing memory
self.model.cleargrads()
batch = self.converter(self.iterator.next(), self.device)
with self.reporter.scope({}): # pass dummy observation
loss = _calc_loss(self.model, batch)
self.model.cleargrads()
loss.backward()
del loss
gg = gather_grads(self.model)
nccl_data_type = _get_nccl_data_type(gg.dtype)
null_stream = cuda.Stream.null
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM, 0,
null_stream.ptr)
del gg
self.model.cleargrads()
gp = gather_params(self.model)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type, 0,
null_stream.ptr)
scatter_params(self.model, gp)
del gp
class MultiprocessParallelUpdater(standard_updater.StandardUpdater):
"""Implementation of a multiprocess parallel GPU Updater.
This is an implementation of :class:`Updater` that uses multiple GPUs
with multi-process data parallelism. It uses Nvidia NCCL for communication
between multiple GPUs.
It behaves similarly to
:class:`~chainer.training.updaters.StandardUpdater`.
The update routine is modified to support data-parallel
computation on multiple GPUs in one machine.
It is based on synchronous parallel SGD: it
parallelizes the gradient computation over a mini-batch, and updates the
parameters only in the main device.
It does not transfer the values collected by :class:`Reporter` in the sub
devices to the main device. So you can only see the reported values in
the main device.
Args:
iterators: List of dataset iterator for the training dataset. The
number of the iterators must be same to the number of GPUs you use.
optimizer: Optimizer to update parameters. The model should be attached
to the optimizer.
converter: Converter function to build input arrays. Each batch
extracted by the iterator is split equally between the devices and
then passed with corresponding ``device`` option to this function.
:func:`~chainer.dataset.concat_examples` is used by default.
devices: Dictionary or list of devices to which the training data is
sent. The master device will be the first one in the list or the
value attached to the key ``'main'``.
auto_new_epoch (bool): If ``True``,
:meth:`~chainer.Optimizer.new_epoch` of the main optimizer is
automatically called when the ``is_new_epoch`` attribute of the
main iterator is ``True``.
"""
def __init__(self, iterators, optimizer, converter=convert.concat_examples,
devices=None, auto_new_epoch=True):
if not MultiprocessParallelUpdater.available():
raise Exception(
'NCCL is not enabled. MultiprocessParallelUpdater '
'requires NCCL.\n'
'Please reinstall CuPy after you install NCCL.\n'
'(see https://docs-cupy.chainer.org/en/latest/install.html)')
try:
cuda.cupy.cuda.driver.ctxGetCurrent()
_cuda_initialized = True
except cuda.cupy.cuda.driver.CUDADriverError:
# The context is not initialized, it will be fine.
_cuda_initialized = False
if _cuda_initialized:
raise RuntimeError(
'The CUDA context has been already initialized. '
'MultiprocessParallelUpdater assumes the context is '
'uninitialized. Please do not call CUDA API before '
'MultiprocessParallelUpdater creates processes.')
assert len(iterators) == len(devices)
for iterator in iterators[1:]:
assert len(iterator.dataset) == len(iterators[0].dataset)
# Correct optimizer parameters for new minibatch size
optim = optimizer.__class__.__name__
if optim in ('Adam', 'AdaGrad', 'RMSprop'):
optimizer.eps *= len(devices)
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif optim in ('RMSpropGraves', 'AdaDelta'):
optimizer.eps *= len(devices) ** 2 # not quite right for AdaDelta
warnings.warn('optimizer.eps is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.eps))
elif hasattr(optimizer, 'lr'):
optimizer.lr /= len(devices)
warnings.warn('optimizer.lr is changed to {} '
'by MultiprocessParallelUpdater for new batch size.'.
format(optimizer.lr))
super(MultiprocessParallelUpdater, self).__init__(
iterator=iterators[0],
optimizer=optimizer,
converter=converter,
auto_new_epoch=auto_new_epoch,
)
if isinstance(devices, dict):
devices = devices.copy()
main = devices.pop('main')
devices = list(six.itervalues(devices))
devices = [main] + devices
elif isinstance(devices, (list, tuple)):
devices = list(devices)
else:
raise ValueError(
'devices argument should be either dict, list or tuple,'
' but {} was given.'.format(type(devices)))
if devices is None or any(device is None for device in devices):
raise ValueError('must specify GPU devices')
self._master = optimizer.target
self._devices = devices
self._mpu_iterators = iterators
self._initialized = False
self._pipes = []
self._workers = []
self.comm = None
@staticmethod
def available():
return _available
def _send_message(self, message):
for pipe in self._pipes:
pipe.send(message)
def setup_workers(self):
if self._initialized:
return
self._initialized = True
self._master.cleargrads()
for i in six.moves.range(1, len(self._devices)):
pipe, worker_end = multiprocessing.Pipe()
worker = _Worker(i, worker_end, self)
worker.start()
self._workers.append(worker)
self._pipes.append(pipe)
with cuda.Device(self._devices[0]):
self._master.to_gpu(self._devices[0])
if len(self._devices) > 1:
comm_id = nccl.get_unique_id()
self._send_message(('set comm_id', comm_id))
self.comm = nccl.NcclCommunicator(len(self._devices),
comm_id, 0)
def update_core(self):
self.setup_workers()
self._send_message(('update', None))
with cuda.Device(self._devices[0]):
# For reducing memory
self._master.cleargrads()
optimizer = self.get_optimizer('main')
iterator = self.get_iterator('main')
batch = iterator.next()
batch = self.converter(batch, self._devices[0])
loss = _calc_loss(self._master, batch)
self._master.cleargrads()
loss.backward()
# NCCL: reduce grads
null_stream = cuda.Stream.null
if self.comm is not None:
gg = gather_grads(self._master)
nccl_data_type = _get_nccl_data_type(gg.dtype)
self.comm.reduce(gg.data.ptr, gg.data.ptr, gg.size,
nccl_data_type, nccl.NCCL_SUM,
0, null_stream.ptr)
scatter_grads(self._master, gg)
del gg
optimizer.update()
if self.comm is not None:
gp = gather_params(self._master)
nccl_data_type = _get_nccl_data_type(gp.dtype)
self.comm.bcast(gp.data.ptr, gp.size, nccl_data_type,
0, null_stream.ptr)
if self.auto_new_epoch and iterator.is_new_epoch:
optimizer.new_epoch(auto=True)
def finalize(self):
self._send_message(('finalize', None))
for worker in self._workers:
worker.join()
def _calc_loss(model, in_arrays):
if isinstance(in_arrays, tuple):
return model(*in_arrays)
elif isinstance(in_arrays, dict):
return model(**in_arrays)
else:
return model(in_arrays)
def size_num_grads(link):
"""Count total size of all gradient arrays of a given link
Args:
link (chainer.link.Link): Target link object.
"""
size = 0
num = 0
for param in link.params():
if param.size == 0:
continue
size += param.size
num += 1
return size, num
def _memcpy_gather():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info',
'raw float32 dst',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_dst = i;
int i_src = i;
if (id > 0) i_src -= info[id];
dst[i_dst] = 0;
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *src = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = src[i_src];
}
else { // fp16
float16 *src = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float>(src[i_src]);
}
}
id_pre = id;
''',
'_memcpy_gather',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _gather(link, target):
size, num = size_num_grads(link)
ptrs = numpy.empty(num, dtype=numpy.uint64)
dtypes = numpy.empty(num, dtype=numpy.int8)
info = numpy.empty(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is not None:
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_gather()(ptrs, dtypes, info, size=size)
def gather_grads(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('gather_grads works only on GPU.')
return _gather(link, 'grad')
def gather_params(link):
"""Put together all gradient arrays and make a single array
Args:
link (chainer.link.Link): Target link object.
Return:
cupy.ndarray
"""
if link.xp is numpy:
raise RuntimeError('Link.gather_params works only on GPU.')
return _gather(link, 'data')
def _memcpy_scatter():
return cuda.elementwise(
'raw T ptrs, raw X dtypes, raw Y info, raw float32 array',
'',
'''
int id_min = id_pre;
int id_max = num_src;
while (id_max - id_min > 1) {
int id = (id_max + id_min) / 2;
if (i < info[id]) id_max = id;
else id_min = id;
}
int id = id_min;
int i_src = i;
int i_dst = i;
if (id > 0) i_dst -= info[id];
if (ptrs[id] != NULL) {
if (dtypes[id] == 0) { // fp32
float *dst = reinterpret_cast<float *>(ptrs[id]);
dst[i_dst] = array[i_src];
}
else { // fp16
float16 *dst = reinterpret_cast<float16 *>(ptrs[id]);
dst[i_dst] = static_cast<float16>(array[i_src]);
}
}
id_pre = id;
''',
'_memcpy_scatter',
loop_prep='''
int num_src = info[0];
int id_pre = 0;
''')
def _scatter(link, array, target):
size, num = size_num_grads(link)
ptrs = numpy.zeros(num, dtype=numpy.uint64)
dtypes = numpy.zeros(num, dtype=numpy.int8)
info = numpy.zeros(num + 1, dtype=numpy.int32)
info[0] = 0
i = 0
for _, param in sorted(link.namedparams()):
if param.size == 0:
continue
ptrs[i] = 0 # NULL pointer
d = getattr(param, target)
if d is None:
d = cuda.cupy.zeros(param.shape, dtype=param.dtype)
setattr(param, target, d)
ptrs[i] = d.data.ptr
dtypes[i] = 0 # fp32
if param.dtype == numpy.float16:
dtypes[i] = 1 # fp16
info[i + 1] = info[i] + param.size
i += 1
if i != num:
raise()
info[0] = num
ptrs = cuda.to_gpu(ptrs)
dtypes = cuda.to_gpu(dtypes)
info = cuda.to_gpu(info)
return _memcpy_scatter()(ptrs, dtypes, info, array, size=size)
def scatter_grads(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_grads()
"""
return _scatter(link, array, 'grad')
def scatter_params(link, array):
"""Put back contents of the specified array to the related gradient arrays
Args:
link (chainer.link.Link): Target link object.
array (cupy.ndarray): gathered array created by gather_params()
"""
return _scatter(link, array, 'data')
def _get_nccl_data_type(dtype):
"""Get data type for NCCL"""
if dtype == numpy.float32:
nccl_data_type = nccl.NCCL_FLOAT
elif dtype == numpy.float16:
nccl_data_type = nccl.NCCL_HALF
elif dtype == numpy.float64:
nccl_data_type = nccl.NCCL_DOUBLE
else:
raise RuntimeError('Unexpected data type:{}'.format(dtype))
return nccl_data_type
| 33.567623 | 79 | 0.561687 | [
"MIT"
] | Lynkzhang/Chainer-UM | chainer/training/updaters/multiprocess_parallel_updater.py | 16,381 | Python |
import libtmux
def ensure_server() -> libtmux.Server:
'''
Either create new or return existing server
'''
return libtmux.Server()
def spawn_session(name: str, kubeconfig_location: str, server: libtmux.Server):
if server.has_session(name):
return
else:
session = server.new_session(name)
session.set_environment("KUBECONFIG", kubeconfig_location)
# the new_session will create default window and pane which will not contain KUBECONFIG, add manually
session.attached_window.attached_pane.send_keys("export KUBECONFIG={}".format(kubeconfig_location))
| 32.421053 | 109 | 0.719156 | [
"MIT"
] | kiemlicz/kmux | kmux/tmux.py | 616 | Python |
import numpy as np
import cv2
from PIL import Image
img_form = "jpg"
img_out_dir = "./output_images"
vid_form = "mp4"
vid_out_dir = "./test_videos_output"
class array_image:
def __init__(self):
self.image = None
self.binary_image = None
def store(self, name):
name = img_out_dir + "/" + name + "." + img_form
print("Saving image: " + name)
im = Image.fromarray(self.binary_image)
im.save(name)
class color(array_image):
def __init__(self, caller=None, color = "Gr"):
threshold = {'R':(200, 255), 'G':(200, 255), 'B':(200, 255), 'H':(15, 100), 'L':(0,255), 'S':(90, 255), 'Gr':(200, 255)}
self.available = False
self.binary_available = False
self.image = None
self.binary_image = None
self.caller = caller
self.color = color
self.threshold = threshold[self.color]
def get(self, binary=False, masked=False, thresh=None):
ret = 0
if (self.available) & (thresh==None):
if binary:
if self.binary_available:
ret = self.binary_image
else:
self.binary_image = self.color_select(color=self.color, binary=True)
self.binary_available = True
ret = self.binary_image
else:
ret = self.image
else:
self.image = self.color_select(color=self.color, binary=False)
self.available = True
if binary:
self.binary_image = self.color_select(color=self.color, binary=True, thresh=None)
self.binary_available = True
ret = self.binary_image
else:
ret = self.image
if masked:
ret = self.caller.region_of_interest(ret)
return ret
def grayscale(self):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(self.caller.image, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def color_select(self, color='R', binary = True, thresh=None):
#image received is RGB mpimg.imread
img = np.copy(self.caller.image)
RGB_colors = {'R':0, 'G':1, 'B':2}
HLS_colors = {'H':0, 'L':1, 'S':2}
if color in RGB_colors:
channel = img[:,:,RGB_colors[color]]
elif color in HLS_colors:
img = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
channel = img[:, :, HLS_colors[color]]
else:
channel = self.grayscale()
if binary:
if not thresh:
thresh = self.threshold
binary_output = np.zeros_like(img[:,:,0])
binary_output[(channel > thresh[0]) & (channel <= thresh[1])] = 1
return binary_output
else:
return channel
| 34.615385 | 128 | 0.563492 | [
"MIT"
] | mhhm2005eg/CarND-Advanced-Lane-Lines | color.py | 3,150 | Python |
import numpy as np
from network.activation import Activation
from network.layer import Layer
from network.utils.im2col_cython import im2col_cython, col2im_cython
class Convolution(Layer):
def __init__(self, filter_shape, stride, padding, dropout_rate: float = 0, activation: Activation = None,
last_layer=False, weight_initializer=None, fb_weight_initializer=None) -> None:
assert len(filter_shape) == 4, \
"invalid filter shape: 4-tuple required, {}-tuple given".format(len(filter_shape))
super().__init__()
self.filter_shape = filter_shape
self.stride = stride
self.padding = padding
self.dropout_rate = dropout_rate
self.activation = activation
self.last_layer = last_layer
self.weight_initializer = weight_initializer
self.fb_weight_initializer = fb_weight_initializer
def initialize(self, input_size, num_classes, train_method) -> tuple:
assert np.size(input_size) == 3, \
"invalid input size: 3-tuple required for convolution layer"
c_in, h_in, w_in = input_size
f, c_f, h_f, w_f = self.filter_shape
assert c_in == c_f, \
"input channel dimension ({}) not compatible with filter channel dimension ({})".format(c_in, c_f)
assert (h_in - h_f + 2 * self.padding) % self.stride == 0, \
"filter width ({}) not compatible with input width ({})".format(h_f, h_in)
assert (w_in - w_f + 2 * self.padding) % self.stride == 0, \
"filter height ({}) not compatible with input height ({})".format(h_f, h_in)
self.h_out = ((h_in - h_f + 2 * self.padding) // self.stride) + 1
self.w_out = ((w_in - w_f + 2 * self.padding) // self.stride) + 1
# initialize weights
if self.weight_initializer is None:
sqrt_fan_in = np.sqrt(c_in * h_in * w_in)
self.W = np.random.uniform(low=-1 / sqrt_fan_in, high=1 / sqrt_fan_in, size=self.filter_shape)
else:
self.W = self.weight_initializer.init(dim=(f, c_f, h_f, w_f))
# initialize feedback weights
if self.fb_weight_initializer is None:
sqrt_fan_out = np.sqrt(f * self.h_out * self.w_out)
# self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f, self.h_out, self.w_out))
self.B = np.random.uniform(low=-1 / sqrt_fan_out, high=1 / sqrt_fan_out, size=(num_classes, f * self.h_out * self.w_out))
else:
# self.B = self.fb_weight_initializer.init(dim=(num_classes, f, self.h_out, self.w_out))
self.B = self.fb_weight_initializer.init(dim=(num_classes, f * self.h_out * self.w_out))
# initialize bias units
self.b = np.zeros(f)
return f, self.h_out, self.w_out
def forward(self, X, mode='predict') -> np.ndarray:
n_in, c, h_in, w_in = X.shape
n_f, c, h_f, w_f = self.W.shape
self.x_cols = im2col_cython(X, h_f, w_f, self.padding, self.stride) # <->
z = self.W.reshape((n_f, -1)).dot(self.x_cols)
z += self.b.reshape(-1, 1) # +
z = z.reshape(n_f, self.h_out, self.w_out, n_in).transpose(3, 0, 1, 2)
self.a_in = X
if self.activation is None:
self.a_out = z
else:
self.a_out = self.activation.forward(z)
if mode == 'train' and self.dropout_rate > 0:
# self.dropout_mask = np.random.binomial(size=self.a_out.shape, n=1, p=1 - self.dropout_rate)
self.dropout_mask = (np.random.rand(*self.a_out.shape) > self.dropout_rate).astype(int)
self.a_out *= self.dropout_mask
return self.a_out
def dfa(self, E: np.ndarray) -> tuple:
# E = np.einsum('ij,jklm->iklm', E, self.B)
n_f, c_f, h_f, w_f = self.W.shape
E = np.dot(E, self.B).reshape((-1, n_f, self.h_out, self.w_out))
if self.dropout_rate > 0:
E *= self.dropout_mask
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
dW = E.transpose((1, 2, 3, 0)).reshape(n_f, -1).dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dW, db
def back_prob(self, E: np.ndarray) -> tuple:
if self.dropout_rate > 0:
E *= self.dropout_mask
n_in, c_in, h_in, w_in = self.a_in.shape
n_f, c_f, h_f, w_f = self.W.shape
if self.activation is None:
E *= self.a_out
else:
E *= self.activation.gradient(self.a_out)
delta_reshaped = E.transpose((1, 2, 3, 0)).reshape(n_f, -1)
dX_cols = self.W.reshape(n_f, -1).T.dot(delta_reshaped)
dX = col2im_cython(dX_cols, n_in, c_in, h_in, w_in, h_f, w_f, self.padding, self.stride)
dW = delta_reshaped.dot(self.x_cols.T).reshape(self.W.shape)
db = np.sum(E, axis=(0, 2, 3))
return dX, dW, db
def has_weights(self) -> bool:
return True
| 40.384 | 133 | 0.602813 | [
"MIT"
] | metataro/DirectFeedbackAlignment | network/layers/convolution_im2col.py | 5,048 | Python |
#!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from mock import MagicMock, patch
from polyaxon.cli.artifacts import artifacts
from polyaxon_sdk import V1ProjectVersionKind
from tests.test_cli.utils import BaseCommandTestCase
@pytest.mark.cli_mark
class TestCliArtifacts(BaseCommandTestCase):
@patch("polyaxon_sdk.ProjectsV1Api.create_version")
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_create_artifact(self, get_version, patch_version, create_version):
self.runner.invoke(artifacts, ["register"])
assert create_version.call_count == 0
assert patch_version.call_count == 0
assert get_version.call_count == 0
get_version.return_value = None
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 1
assert patch_version.call_count == 0
assert create_version.call_count == 1
get_version.return_value = MagicMock(
kind=V1ProjectVersionKind.ARTIFACT,
)
self.runner.invoke(artifacts, ["register", "--project=owner/foo"])
assert get_version.call_count == 2
assert patch_version.call_count == 0
assert create_version.call_count == 1
self.runner.invoke(artifacts, ["register", "--project=owner/foo", "--force"])
assert get_version.call_count == 3
assert patch_version.call_count == 1
assert create_version.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.list_versions")
def test_list_artifacts(self, list_artifacts):
self.runner.invoke(artifacts, ["ls", "--project=owner/foo"])
assert list_artifacts.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.get_version")
def test_get_artifact(self, get_artifact):
self.runner.invoke(artifacts, ["get", "-p", "admin/foo"])
assert get_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.patch_version")
def test_update_artifact(self, update_artifact):
self.runner.invoke(
artifacts, ["update", "-p", "admin/foo", "--description=foo"]
)
assert update_artifact.call_count == 1
@patch("polyaxon_sdk.ProjectsV1Api.create_version_stage")
def test_update_artifact_stage(self, stage_artifact):
self.runner.invoke(
artifacts, ["stage", "-p", "admin/foo", "-to", "production", "--reason=foo"]
)
assert stage_artifact.call_count == 1
| 39.448718 | 88 | 0.702632 | [
"Apache-2.0"
] | polyaxon/cli | cli/tests/test_cli/test_artifacts.py | 3,077 | Python |
#encoding:utf-8
subreddit = 'rainbow6'
t_channel = '@r_rainbow6'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| 15.444444 | 38 | 0.741007 | [
"MIT"
] | AliannejadiPourya/reddit2telegram | reddit2telegram/channels/r_rainbow6/app.py | 139 | Python |
version = '2.9.0'
| 9.5 | 18 | 0.526316 | [
"MIT"
] | pozi/PoziConnect | app/PoziConnect/version.py | 19 | Python |
import asyncio
import logging
from typing import Dict, List, Optional, Set, Tuple
from seno.types.blockchain_format.sized_bytes import bytes32
from seno.util.ints import uint32, uint128
log = logging.getLogger(__name__)
class SyncStore:
# Whether or not we are syncing
sync_mode: bool
long_sync: bool
peak_to_peer: Dict[bytes32, Set[bytes32]] # Header hash : peer node id
peer_to_peak: Dict[bytes32, Tuple[bytes32, uint32, uint128]] # peer node id : [header_hash, height, weight]
sync_target_header_hash: Optional[bytes32] # Peak hash we are syncing towards
sync_target_height: Optional[uint32] # Peak height we are syncing towards
peers_changed: asyncio.Event
batch_syncing: Set[bytes32] # Set of nodes which we are batch syncing from
backtrack_syncing: Dict[bytes32, int] # Set of nodes which we are backtrack syncing from, and how many threads
@classmethod
async def create(cls):
self = cls()
self.sync_mode = False
self.long_sync = False
self.sync_target_header_hash = None
self.sync_target_height = None
self.peak_fork_point = {}
self.peak_to_peer = {}
self.peer_to_peak = {}
self.peers_changed = asyncio.Event()
self.batch_syncing = set()
self.backtrack_syncing = {}
return self
def set_peak_target(self, peak_hash: bytes32, target_height: uint32):
self.sync_target_header_hash = peak_hash
self.sync_target_height = target_height
def get_sync_target_hash(self) -> Optional[bytes32]:
return self.sync_target_header_hash
def get_sync_target_height(self) -> Optional[bytes32]:
return self.sync_target_height
def set_sync_mode(self, sync_mode: bool):
self.sync_mode = sync_mode
def get_sync_mode(self) -> bool:
return self.sync_mode
def set_long_sync(self, long_sync: bool):
self.long_sync = long_sync
def get_long_sync(self) -> bool:
return self.long_sync
def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool):
"""
Adds a record that a certain peer has a block.
"""
if header_hash == self.sync_target_header_hash:
self.peers_changed.set()
if header_hash in self.peak_to_peer:
self.peak_to_peer[header_hash].add(peer_id)
else:
self.peak_to_peer[header_hash] = {peer_id}
if new_peak:
self.peer_to_peak[peer_id] = (header_hash, height, weight)
def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]:
"""
Returns: peer ids of peers that have at least one of the header hashes.
"""
node_ids: Set[bytes32] = set()
for header_hash in header_hashes:
if header_hash in self.peak_to_peer:
for node_id in self.peak_to_peer[header_hash]:
node_ids.add(node_id)
return node_ids
def get_peak_of_each_peer(self) -> Dict[bytes32, Tuple[bytes32, uint32, uint128]]:
"""
Returns: dictionary of peer id to peak information.
"""
ret = {}
for peer_id, v in self.peer_to_peak.items():
if v[0] not in self.peak_to_peer:
continue
ret[peer_id] = v
return ret
def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]:
"""
Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified
us of.
"""
if len(self.peer_to_peak) == 0:
return None
heaviest_peak_hash: Optional[bytes32] = None
heaviest_peak_weight: uint128 = uint128(0)
heaviest_peak_height: Optional[uint32] = None
for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items():
if peak_hash not in self.peak_to_peer:
continue
if heaviest_peak_hash is None or weight > heaviest_peak_weight:
heaviest_peak_hash = peak_hash
heaviest_peak_weight = weight
heaviest_peak_height = height
assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None
return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight
async def clear_sync_info(self):
"""
Clears the peak_to_peer info which can get quite large.
"""
self.peak_to_peer = {}
def peer_disconnected(self, node_id: bytes32):
if node_id in self.peer_to_peak:
del self.peer_to_peak[node_id]
for peak, peers in self.peak_to_peer.items():
if node_id in peers:
self.peak_to_peer[peak].remove(node_id)
assert node_id not in self.peak_to_peer[peak]
self.peers_changed.set()
| 35.992701 | 119 | 0.653012 | [
"Apache-2.0"
] | AcidBurnSB/seno-blockchain | seno/full_node/sync_store.py | 4,931 | Python |
# coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AddListMembers1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'email_address': 'str',
'email_type': 'str',
'status': 'str',
'merge_fields': 'dict(str, object)',
'interests': 'dict(str, bool)',
'language': 'str',
'vip': 'bool',
'location': 'Location',
'marketing_permissions': 'list[MarketingPermission1]',
'ip_signup': 'str',
'timestamp_signup': 'datetime',
'ip_opt': 'str',
'timestamp_opt': 'datetime',
'tags': 'list[str]'
}
attribute_map = {
'email_address': 'email_address',
'email_type': 'email_type',
'status': 'status',
'merge_fields': 'merge_fields',
'interests': 'interests',
'language': 'language',
'vip': 'vip',
'location': 'location',
'marketing_permissions': 'marketing_permissions',
'ip_signup': 'ip_signup',
'timestamp_signup': 'timestamp_signup',
'ip_opt': 'ip_opt',
'timestamp_opt': 'timestamp_opt',
'tags': 'tags'
}
def __init__(self, email_address=None, email_type=None, status=None, merge_fields=None, interests=None, language=None, vip=None, location=None, marketing_permissions=None, ip_signup=None, timestamp_signup=None, ip_opt=None, timestamp_opt=None, tags=None): # noqa: E501
"""AddListMembers1 - a model defined in Swagger""" # noqa: E501
self._email_address = None
self._email_type = None
self._status = None
self._merge_fields = None
self._interests = None
self._language = None
self._vip = None
self._location = None
self._marketing_permissions = None
self._ip_signup = None
self._timestamp_signup = None
self._ip_opt = None
self._timestamp_opt = None
self._tags = None
self.discriminator = None
self.email_address = email_address
if email_type is not None:
self.email_type = email_type
self.status = status
if merge_fields is not None:
self.merge_fields = merge_fields
if interests is not None:
self.interests = interests
if language is not None:
self.language = language
if vip is not None:
self.vip = vip
if location is not None:
self.location = location
if marketing_permissions is not None:
self.marketing_permissions = marketing_permissions
if ip_signup is not None:
self.ip_signup = ip_signup
if timestamp_signup is not None:
self.timestamp_signup = timestamp_signup
if ip_opt is not None:
self.ip_opt = ip_opt
if timestamp_opt is not None:
self.timestamp_opt = timestamp_opt
if tags is not None:
self.tags = tags
@property
def email_address(self):
"""Gets the email_address of this AddListMembers1. # noqa: E501
Email address for a subscriber. # noqa: E501
:return: The email_address of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this AddListMembers1.
Email address for a subscriber. # noqa: E501
:param email_address: The email_address of this AddListMembers1. # noqa: E501
:type: str
"""
if email_address is None:
raise ValueError("Invalid value for `email_address`, must not be `None`") # noqa: E501
self._email_address = email_address
@property
def email_type(self):
"""Gets the email_type of this AddListMembers1. # noqa: E501
Type of email this member asked to get ('html' or 'text'). # noqa: E501
:return: The email_type of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._email_type
@email_type.setter
def email_type(self, email_type):
"""Sets the email_type of this AddListMembers1.
Type of email this member asked to get ('html' or 'text'). # noqa: E501
:param email_type: The email_type of this AddListMembers1. # noqa: E501
:type: str
"""
self._email_type = email_type
@property
def status(self):
"""Gets the status of this AddListMembers1. # noqa: E501
Subscriber's current status. # noqa: E501
:return: The status of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this AddListMembers1.
Subscriber's current status. # noqa: E501
:param status: The status of this AddListMembers1. # noqa: E501
:type: str
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
allowed_values = ["subscribed", "unsubscribed", "cleaned", "pending", "transactional"] # noqa: E501
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}" # noqa: E501
.format(status, allowed_values)
)
self._status = status
@property
def merge_fields(self):
"""Gets the merge_fields of this AddListMembers1. # noqa: E501
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:return: The merge_fields of this AddListMembers1. # noqa: E501
:rtype: dict(str, object)
"""
return self._merge_fields
@merge_fields.setter
def merge_fields(self, merge_fields):
"""Sets the merge_fields of this AddListMembers1.
A dictionary of merge fields where the keys are the merge tags. See the [Merge Fields documentation](https://mailchimp.com/developer/marketing/docs/merge-fields/#structure) for more about the structure. # noqa: E501
:param merge_fields: The merge_fields of this AddListMembers1. # noqa: E501
:type: dict(str, object)
"""
self._merge_fields = merge_fields
@property
def interests(self):
"""Gets the interests of this AddListMembers1. # noqa: E501
The key of this object's properties is the ID of the interest in question. # noqa: E501
:return: The interests of this AddListMembers1. # noqa: E501
:rtype: dict(str, bool)
"""
return self._interests
@interests.setter
def interests(self, interests):
"""Sets the interests of this AddListMembers1.
The key of this object's properties is the ID of the interest in question. # noqa: E501
:param interests: The interests of this AddListMembers1. # noqa: E501
:type: dict(str, bool)
"""
self._interests = interests
@property
def language(self):
"""Gets the language of this AddListMembers1. # noqa: E501
If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501
:return: The language of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this AddListMembers1.
If set/detected, the [subscriber's language](https://mailchimp.com/help/view-and-edit-contact-languages/). # noqa: E501
:param language: The language of this AddListMembers1. # noqa: E501
:type: str
"""
self._language = language
@property
def vip(self):
"""Gets the vip of this AddListMembers1. # noqa: E501
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:return: The vip of this AddListMembers1. # noqa: E501
:rtype: bool
"""
return self._vip
@vip.setter
def vip(self, vip):
"""Sets the vip of this AddListMembers1.
[VIP status](https://mailchimp.com/help/designate-and-send-to-vip-contacts/) for subscriber. # noqa: E501
:param vip: The vip of this AddListMembers1. # noqa: E501
:type: bool
"""
self._vip = vip
@property
def location(self):
"""Gets the location of this AddListMembers1. # noqa: E501
:return: The location of this AddListMembers1. # noqa: E501
:rtype: Location
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this AddListMembers1.
:param location: The location of this AddListMembers1. # noqa: E501
:type: Location
"""
self._location = location
@property
def marketing_permissions(self):
"""Gets the marketing_permissions of this AddListMembers1. # noqa: E501
The marketing permissions for the subscriber. # noqa: E501
:return: The marketing_permissions of this AddListMembers1. # noqa: E501
:rtype: list[MarketingPermission1]
"""
return self._marketing_permissions
@marketing_permissions.setter
def marketing_permissions(self, marketing_permissions):
"""Sets the marketing_permissions of this AddListMembers1.
The marketing permissions for the subscriber. # noqa: E501
:param marketing_permissions: The marketing_permissions of this AddListMembers1. # noqa: E501
:type: list[MarketingPermission1]
"""
self._marketing_permissions = marketing_permissions
@property
def ip_signup(self):
"""Gets the ip_signup of this AddListMembers1. # noqa: E501
IP address the subscriber signed up from. # noqa: E501
:return: The ip_signup of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._ip_signup
@ip_signup.setter
def ip_signup(self, ip_signup):
"""Sets the ip_signup of this AddListMembers1.
IP address the subscriber signed up from. # noqa: E501
:param ip_signup: The ip_signup of this AddListMembers1. # noqa: E501
:type: str
"""
self._ip_signup = ip_signup
@property
def timestamp_signup(self):
"""Gets the timestamp_signup of this AddListMembers1. # noqa: E501
The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501
:return: The timestamp_signup of this AddListMembers1. # noqa: E501
:rtype: datetime
"""
return self._timestamp_signup
@timestamp_signup.setter
def timestamp_signup(self, timestamp_signup):
"""Sets the timestamp_signup of this AddListMembers1.
The date and time the subscriber signed up for the list in ISO 8601 format. # noqa: E501
:param timestamp_signup: The timestamp_signup of this AddListMembers1. # noqa: E501
:type: datetime
"""
self._timestamp_signup = timestamp_signup
@property
def ip_opt(self):
"""Gets the ip_opt of this AddListMembers1. # noqa: E501
The IP address the subscriber used to confirm their opt-in status. # noqa: E501
:return: The ip_opt of this AddListMembers1. # noqa: E501
:rtype: str
"""
return self._ip_opt
@ip_opt.setter
def ip_opt(self, ip_opt):
"""Sets the ip_opt of this AddListMembers1.
The IP address the subscriber used to confirm their opt-in status. # noqa: E501
:param ip_opt: The ip_opt of this AddListMembers1. # noqa: E501
:type: str
"""
self._ip_opt = ip_opt
@property
def timestamp_opt(self):
"""Gets the timestamp_opt of this AddListMembers1. # noqa: E501
The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501
:return: The timestamp_opt of this AddListMembers1. # noqa: E501
:rtype: datetime
"""
return self._timestamp_opt
@timestamp_opt.setter
def timestamp_opt(self, timestamp_opt):
"""Sets the timestamp_opt of this AddListMembers1.
The date and time the subscribe confirmed their opt-in status in ISO 8601 format. # noqa: E501
:param timestamp_opt: The timestamp_opt of this AddListMembers1. # noqa: E501
:type: datetime
"""
self._timestamp_opt = timestamp_opt
@property
def tags(self):
"""Gets the tags of this AddListMembers1. # noqa: E501
The tags that are associated with a member. # noqa: E501
:return: The tags of this AddListMembers1. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this AddListMembers1.
The tags that are associated with a member. # noqa: E501
:param tags: The tags of this AddListMembers1. # noqa: E501
:type: list[str]
"""
self._tags = tags
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AddListMembers1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddListMembers1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.934426 | 273 | 0.61345 | [
"Apache-2.0"
] | RWiggers/mailchimp-marketing-asyncio | mailchimp_marketing_asyncio/models/add_list_members1.py | 15,584 | Python |
#Curso Python #06 - Condições Aninhadas
#Primeiro Exemplo
#nome = str(input('Qual é seu Nome: '))
#if nome == 'Jefferson':
# print('Que Nome Bonito')
#else:
# print('Seu nome é bem normal.')
#print('Tenha um bom dia, {}'.format(nome))
#Segundo Exemplo
nome = str(input('Qual é seu Nome: '))
if nome == 'Jefferson':
print('Que Nome Bonito')
elif nome == 'Pedro' or nome == 'Marcos' or nome == 'Paulo':
print('Seu nome é bem popular no Brasil.')
elif nome in 'Jennifer Vitoria Mariana Deborah':
print('Belo nome você tem em !')
else:
print('Seu nome é bem normal.')
print('Tenha um bom dia, {}'.format(nome)) | 27.478261 | 60 | 0.648734 | [
"MIT"
] | ElHa07/Python | Curso Python/Aula06/CondicoesAinhada.py | 640 | Python |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_space_comm_station_talus.iff"
result.attribute_template_id = 9
result.stfName("npc_name","selonian_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.588235 | 70 | 0.734513 | [
"MIT"
] | SWGANHServices/GameServer_Legacy | data/scripts/templates/object/mobile/shared_space_comm_station_talus.py | 452 | Python |
"""Config flow to configure Xiaomi Miio."""
import logging
from re import search
from micloud import MiCloud
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import SOURCE_REAUTH
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .const import (
CONF_CLOUD_COUNTRY,
CONF_CLOUD_PASSWORD,
CONF_CLOUD_SUBDEVICES,
CONF_CLOUD_USERNAME,
CONF_DEVICE,
CONF_FLOW_TYPE,
CONF_GATEWAY,
CONF_MAC,
CONF_MANUAL,
CONF_MODEL,
DEFAULT_CLOUD_COUNTRY,
DOMAIN,
MODELS_ALL,
MODELS_ALL_DEVICES,
MODELS_GATEWAY,
SERVER_COUNTRY_CODES,
)
from .device import ConnectXiaomiDevice
_LOGGER = logging.getLogger(__name__)
DEVICE_SETTINGS = {
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
}
DEVICE_CONFIG = vol.Schema({vol.Required(CONF_HOST): str}).extend(DEVICE_SETTINGS)
DEVICE_MODEL_CONFIG = vol.Schema({vol.Required(CONF_MODEL): vol.In(MODELS_ALL)})
DEVICE_CLOUD_CONFIG = vol.Schema(
{
vol.Optional(CONF_CLOUD_USERNAME): str,
vol.Optional(CONF_CLOUD_PASSWORD): str,
vol.Optional(CONF_CLOUD_COUNTRY, default=DEFAULT_CLOUD_COUNTRY): vol.In(
SERVER_COUNTRY_CODES
),
vol.Optional(CONF_MANUAL, default=False): bool,
}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if user_input is not None:
use_cloud = user_input.get(CONF_CLOUD_SUBDEVICES, False)
cloud_username = self.config_entry.data.get(CONF_CLOUD_USERNAME)
cloud_password = self.config_entry.data.get(CONF_CLOUD_PASSWORD)
cloud_country = self.config_entry.data.get(CONF_CLOUD_COUNTRY)
if use_cloud and (
not cloud_username or not cloud_password or not cloud_country
):
errors["base"] = "cloud_credentials_incomplete"
# trigger re-auth flow
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data=self.config_entry.data,
)
)
if not errors:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_CLOUD_SUBDEVICES,
default=self.config_entry.options.get(CONF_CLOUD_SUBDEVICES, False),
): bool
}
)
return self.async_show_form(
step_id="init", data_schema=settings_schema, errors=errors
)
class XiaomiMiioFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Xiaomi Miio config flow."""
VERSION = 1
def __init__(self):
"""Initialize."""
self.host = None
self.mac = None
self.token = None
self.model = None
self.name = None
self.cloud_username = None
self.cloud_password = None
self.cloud_country = None
self.cloud_devices = {}
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_reauth(self, user_input=None):
"""Perform reauth upon an authentication error or missing cloud credentials."""
self.host = user_input[CONF_HOST]
self.token = user_input[CONF_TOKEN]
self.mac = user_input[CONF_MAC]
self.model = user_input.get(CONF_MODEL)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Dialog that informs the user that reauth is required."""
if user_input is not None:
return await self.async_step_cloud()
return self.async_show_form(
step_id="reauth_confirm", data_schema=vol.Schema({})
)
async def async_step_import(self, conf: dict):
"""Import a configuration from config.yaml."""
self.host = conf[CONF_HOST]
self.token = conf[CONF_TOKEN]
self.name = conf.get(CONF_NAME)
self.model = conf.get(CONF_MODEL)
self.context.update(
{"title_placeholders": {"name": f"YAML import {self.host}"}}
)
return await self.async_step_connect()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
return await self.async_step_cloud()
async def async_step_zeroconf(self, discovery_info):
"""Handle zeroconf discovery."""
name = discovery_info.get("name")
self.host = discovery_info.get("host")
self.mac = discovery_info.get("properties", {}).get("mac")
if self.mac is None:
poch = discovery_info.get("properties", {}).get("poch", "")
result = search(r"mac=\w+", poch)
if result is not None:
self.mac = result.group(0).split("=")[1]
if not name or not self.host or not self.mac:
return self.async_abort(reason="not_xiaomi_miio")
self.mac = format_mac(self.mac)
# Check which device is discovered.
for gateway_model in MODELS_GATEWAY:
if name.startswith(gateway_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"Gateway {self.host}"}}
)
return await self.async_step_cloud()
for device_model in MODELS_ALL_DEVICES:
if name.startswith(device_model.replace(".", "-")):
unique_id = self.mac
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
self.context.update(
{"title_placeholders": {"name": f"{device_model} {self.host}"}}
)
return await self.async_step_cloud()
# Discovered device is not yet supported
_LOGGER.debug(
"Not yet supported Xiaomi Miio device '%s' discovered with host %s",
name,
self.host,
)
return self.async_abort(reason="not_xiaomi_miio")
def extract_cloud_info(self, cloud_device_info):
"""Extract the cloud info."""
if self.host is None:
self.host = cloud_device_info["localip"]
if self.mac is None:
self.mac = format_mac(cloud_device_info["mac"])
if self.model is None:
self.model = cloud_device_info["model"]
if self.name is None:
self.name = cloud_device_info["name"]
self.token = cloud_device_info["token"]
async def async_step_cloud(self, user_input=None):
"""Configure a xiaomi miio device through the Miio Cloud."""
errors = {}
if user_input is not None:
if user_input[CONF_MANUAL]:
return await self.async_step_manual()
cloud_username = user_input.get(CONF_CLOUD_USERNAME)
cloud_password = user_input.get(CONF_CLOUD_PASSWORD)
cloud_country = user_input.get(CONF_CLOUD_COUNTRY)
if not cloud_username or not cloud_password or not cloud_country:
errors["base"] = "cloud_credentials_incomplete"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
miio_cloud = MiCloud(cloud_username, cloud_password)
if not await self.hass.async_add_executor_job(miio_cloud.login):
errors["base"] = "cloud_login_error"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
devices_raw = await self.hass.async_add_executor_job(
miio_cloud.get_devices, cloud_country
)
if not devices_raw:
errors["base"] = "cloud_no_devices"
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
self.cloud_devices = {}
for device in devices_raw:
parent_id = device.get("parent_id")
if not parent_id:
name = device["name"]
model = device["model"]
list_name = f"{name} - {model}"
self.cloud_devices[list_name] = device
self.cloud_username = cloud_username
self.cloud_password = cloud_password
self.cloud_country = cloud_country
if self.host is not None:
for device in self.cloud_devices.values():
cloud_host = device.get("localip")
if cloud_host == self.host:
self.extract_cloud_info(device)
return await self.async_step_connect()
if len(self.cloud_devices) == 1:
self.extract_cloud_info(list(self.cloud_devices.values())[0])
return await self.async_step_connect()
return await self.async_step_select()
return self.async_show_form(
step_id="cloud", data_schema=DEVICE_CLOUD_CONFIG, errors=errors
)
async def async_step_select(self, user_input=None):
"""Handle multiple cloud devices found."""
errors = {}
if user_input is not None:
cloud_device = self.cloud_devices[user_input["select_device"]]
self.extract_cloud_info(cloud_device)
return await self.async_step_connect()
select_schema = vol.Schema(
{vol.Required("select_device"): vol.In(list(self.cloud_devices))}
)
return self.async_show_form(
step_id="select", data_schema=select_schema, errors=errors
)
async def async_step_manual(self, user_input=None):
"""Configure a xiaomi miio device Manually."""
errors = {}
if user_input is not None:
self.token = user_input[CONF_TOKEN]
if user_input.get(CONF_HOST):
self.host = user_input[CONF_HOST]
return await self.async_step_connect()
if self.host:
schema = vol.Schema(DEVICE_SETTINGS)
else:
schema = DEVICE_CONFIG
return self.async_show_form(step_id="manual", data_schema=schema, errors=errors)
async def async_step_connect(self, user_input=None):
"""Connect to a xiaomi miio device."""
errors = {}
if self.host is None or self.token is None:
return self.async_abort(reason="incomplete_info")
if user_input is not None:
self.model = user_input[CONF_MODEL]
# Try to connect to a Xiaomi Device.
connect_device_class = ConnectXiaomiDevice(self.hass)
await connect_device_class.async_connect_device(self.host, self.token)
device_info = connect_device_class.device_info
if self.model is None and device_info is not None:
self.model = device_info.model
if self.model is None:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
if self.mac is None and device_info is not None:
self.mac = format_mac(device_info.mac_address)
unique_id = self.mac
existing_entry = await self.async_set_unique_id(
unique_id, raise_on_progress=False
)
if existing_entry:
data = existing_entry.data.copy()
data[CONF_HOST] = self.host
data[CONF_TOKEN] = self.token
if (
self.cloud_username is not None
and self.cloud_password is not None
and self.cloud_country is not None
):
data[CONF_CLOUD_USERNAME] = self.cloud_username
data[CONF_CLOUD_PASSWORD] = self.cloud_password
data[CONF_CLOUD_COUNTRY] = self.cloud_country
self.hass.config_entries.async_update_entry(existing_entry, data=data)
await self.hass.config_entries.async_reload(existing_entry.entry_id)
return self.async_abort(reason="reauth_successful")
if self.name is None:
self.name = self.model
flow_type = None
for gateway_model in MODELS_GATEWAY:
if self.model.startswith(gateway_model):
flow_type = CONF_GATEWAY
if flow_type is None:
for device_model in MODELS_ALL_DEVICES:
if self.model.startswith(device_model):
flow_type = CONF_DEVICE
if flow_type is not None:
return self.async_create_entry(
title=self.name,
data={
CONF_FLOW_TYPE: flow_type,
CONF_HOST: self.host,
CONF_TOKEN: self.token,
CONF_MODEL: self.model,
CONF_MAC: self.mac,
CONF_CLOUD_USERNAME: self.cloud_username,
CONF_CLOUD_PASSWORD: self.cloud_password,
CONF_CLOUD_COUNTRY: self.cloud_country,
},
)
errors["base"] = "unknown_device"
return self.async_show_form(
step_id="connect", data_schema=DEVICE_MODEL_CONFIG, errors=errors
)
| 36.502564 | 88 | 0.606631 | [
"Apache-2.0"
] | 0xFEEDC0DE64/homeassistant-core | homeassistant/components/xiaomi_miio/config_flow.py | 14,236 | Python |
#!/usr/bin/env python
import os
import re
import pickle
import json
import glob
import numpy as np
from abc import ABC, abstractmethod
from concurrent.futures import ProcessPoolExecutor
from contextlib import contextmanager
from collections import namedtuple, OrderedDict
from tqdm import tqdm
from .utils import img_to_jpeg_bytes, jpeg_bytes_to_img, _DEFAULT_JPEG_QUALITY
from pathlib import Path
#from simplejpeg import is_jpeg
def is_jpeg(data):
"""
Check whether a bytes object (or similar) contains JPEG (JFIF) data.
Returns False for truncated files.
Taken from simplejpeg.is_jpeg, but less strict because it doesn't check EOI, as most JPEG viewers don't really throw error for missing EOI.
:param data: JPEG (JFIF) data
:return: True if JPEG
"""
return data[:2] == b'\xFF\xD8'
ImgInfo = namedtuple('ImgInfo', ['loc',
'pad',
'length'])
class FileFormatException(Exception):
pass
class AbstractSerializer(ABC): # pragma: no cover
@abstractmethod
def load(self, file_name):
pass
@abstractmethod
def dump(self, thing, file_name):
pass
class PickleSerializer(AbstractSerializer):
def load(self, file_name):
with open(file_name, 'rb') as file_pointer:
return pickle.load(file_pointer)
def dump(self, thing, file_name):
with open(file_name, 'wb') as file_pointer:
pickle.dump(thing, file_pointer)
class JSONSerializer(AbstractSerializer):
def load(self, file_name):
with open(file_name, 'r') as file_pointer:
return json.load(file_pointer, object_pairs_hook=OrderedDict)
def dump(self, thing, file_name):
with open(file_name, 'w') as file_pointer:
json.dump(thing, file_pointer)
pickle_serializer = PickleSerializer()
json_serializer = JSONSerializer()
def extract_input_for_getitem(element):
if isinstance(element, tuple) and len(element) == 2:
id_, slice_ = element
elif isinstance(element, (int, str)):
id_, slice_ = element, None
else:
raise TypeError("Undefined input type! id or (id, slice) expected")
id_ = str(id_)
return id_, slice_
class GulpDirectory(object):
""" Represents a directory containing *.gulp and *.gmeta files.
Parameters
----------
output_dir: str
Path to the directory containing the files.
jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns
the desired decoded image format (e.g. np.ndarray)
Attributes
----------
all_meta_dicts: list of dicts
All meta dicts from all chunks as a list.
chunk_lookup: dict: int -> str
Mapping element id to chunk index.
chunk_objs_lookup: dict: int -> GulpChunk
Mapping element id to chunk index.
merged_meta_dict: dict: id -> meta dict
all meta dicts merged
"""
def __init__(self, output_dir, jpeg_decoder=jpeg_bytes_to_img):
self.output_dir = output_dir
self.jpeg_decoder = jpeg_decoder
self.chunk_objs_lookup = OrderedDict(zip(self._chunk_ids(), self._chunks()))
self.all_meta_dicts = [c.meta_dict for c in self.chunk_objs_lookup.values()]
self.num_chunks = len(self.chunk_objs_lookup)
self.chunk_lookup = {}
for chunk_id, chunk in self.chunk_objs_lookup.items():
for id_ in chunk.meta_dict:
self.chunk_lookup[id_] = chunk_id
self.merged_meta_dict = {}
for d in self.all_meta_dicts:
for k in d.keys():
assert k not in self.merged_meta_dict,\
"Duplicate id detected {}".format(k)
else:
self.merged_meta_dict.update(d)
def __iter__(self):
return iter(self.chunk_objs_lookup.values())
def chunks(self):
""" Return a generator over existing GulpChunk objects which are ready
to be opened and read from. """
return self.__iter__()
def _chunks(self):
return (GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in
self._existing_file_paths())
def new_chunks(self, total_new_chunks):
""" Return a generator over freshly setup GulpChunk objects which are ready
to be opened and written to.
Parameters
----------
total_new_chunks: int
The total number of new chunks to initialize.
"""
return ((GulpChunk(*paths, jpeg_decoder=self.jpeg_decoder) for paths in
self._allocate_new_file_paths(total_new_chunks)))
def __getitem__(self, element):
id_, _ = extract_input_for_getitem(element)
chunk_id = self.chunk_lookup[id_]
gulp_chunk = self.chunk_objs_lookup[chunk_id]
with gulp_chunk.open():
return gulp_chunk[element]
def _find_existing_data_paths(self):
return sorted(glob.glob(os.path.join(self.output_dir, 'data*.gulp')))
def _find_existing_meta_paths(self):
return sorted(glob.glob(os.path.join(self.output_dir, 'meta*.gmeta')))
def _load_label_dict(self):
return json.load(open(os.path.join(self.output_dir, 'label2idx.json'),
'rb'))
def _existing_file_paths(self):
data_paths = self._find_existing_data_paths()
meta_paths = self._find_existing_meta_paths()
assert len(data_paths) == len(meta_paths)
return zip(data_paths, meta_paths)
def _find_ids_from_paths(self, paths):
return [int(re.findall(r'\d+', os.path.basename(p))[0]) for p in paths]
def _chunk_ids(self):
data_paths = self._find_existing_data_paths()
meta_paths = self._find_existing_meta_paths()
data_ids = self._find_ids_from_paths(data_paths)
meta_ids = self._find_ids_from_paths(meta_paths)
assert data_ids == meta_ids
return data_ids
def _next_chunk_id(self):
existing_chunk_ids = self._chunk_ids()
next_chunk_id = 0
if len(existing_chunk_ids) > 0:
next_chunk_id = max([int(i) for i in existing_chunk_ids]) + 1
return next_chunk_id
def _allocate_new_file_paths(self, total_new_chunks):
next_chunk_id = self._next_chunk_id()
return [self._initialize_filenames(i)
for i in range(next_chunk_id,
next_chunk_id + total_new_chunks)]
def _initialize_filenames(self, chunk_id):
data_file_path = os.path.join(
self.output_dir, 'data_{}.gulp'.format(chunk_id))
meta_file_path = os.path.join(
self.output_dir, 'meta_{}.gmeta'.format(chunk_id))
return data_file_path, meta_file_path
class GulpChunk(object):
""" Represents a gulp chunk on disk.
Parameters
----------
data_file_path: str
Path to the *.gulp file.
meta_file_path: str
Path to the *.gmeta file.
serializer: subclass of AbstractSerializer
The type of serializer to use.
jpeg_decoder: callable that takes a JPEG stored as :py:class:`bytes` and returns
the desired decoded image format (e.g. np.ndarray)
"""
def __init__(self, data_file_path, meta_file_path,
serializer=json_serializer, jpeg_decoder=jpeg_bytes_to_img):
self.jpeg_decoder = jpeg_decoder
self.serializer = serializer
self.data_file_path = data_file_path
self.meta_file_path = meta_file_path
self.meta_dict = self._get_or_create_dict()
self._img_info = {}
self.fp = None
def __contains__(self, id_):
return str(id_) in self.meta_dict
def __getitem__(self, element):
id_, slice_ = extract_input_for_getitem(element)
return self.read_frames(id_, slice_)
def __iter__(self):
return self.iter_all()
def _get_frame_infos(self, id_):
id_ = str(id_)
if id_ in self.meta_dict:
return (self._get_or_create_img_info(id_),
self._copy_meta_data(id_))
def _copy_meta_data(self, id_):
return dict(self.meta_dict[id_]['meta_data'][0])
def _get_or_create_img_info(self, id_):
if id_ not in self._img_info:
self._img_info[id_] = [ImgInfo(*info) for info in self.meta_dict[id_]['frame_info']]
return self._img_info[id_]
def _get_or_create_dict(self):
if os.path.exists(self.meta_file_path):
return self.serializer.load(self.meta_file_path)
else:
return OrderedDict()
@staticmethod
def _default_factory():
return OrderedDict([('frame_info', []), ('meta_data', [])])
@staticmethod
def _pad_image(number):
return (4 - (number % 4)) % 4
def _append_meta(self, id_, meta_data):
id_ = str(id_)
if id_ not in self.meta_dict: # implements an OrderedDefaultDict
self.meta_dict[id_] = self._default_factory()
self.meta_dict[id_]['meta_data'].append(meta_data)
def _write_frame(self, id_, image, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
loc = self.fp.tell()
if isinstance(image, (str, Path)):
# If image is a string or pathlib Path, assume that it is a path to a jpeg file
# and add it directly without decoding and encoding it.
with open(str(image), 'rb') as image_file:
img_str = image_file.read()
if not is_jpeg(img_str):
raise FileFormatException(f'Image file from path {image} does not appear to be a JPEG file.')
else: # np.array
img_str = img_to_jpeg_bytes(image, jpeg_encode_quality)
assert len(img_str) > 0
pad = self._pad_image(len(img_str))
record = img_str.ljust(len(img_str) + pad, b'\0')
assert len(record) > 0
img_info = ImgInfo(loc=loc,
length=len(record),
pad=pad)
id_ = str(id_)
if id_ not in self.meta_dict: # implements an OrderedDefaultDict
self.meta_dict[id_] = self._default_factory()
self.meta_dict[id_]['frame_info'].append(img_info)
self.fp.write(record)
def _write_frames(self, id_, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
for frame in frames:
self._write_frame(id_, frame, jpeg_encode_quality)
@contextmanager
def open(self, flag='rb'):
"""Open the gulp chunk for reading.
Parameters
----------
flag: str
'rb': Read binary
'wb': Write binary
'ab': Append to binary
Notes
-----
Works as a context manager but returns None.
"""
if flag in ['wb', 'rb', 'ab']:
self.fp = open(self.data_file_path, flag)
else:
m = "This file does not support the mode: '{}'".format(flag)
raise NotImplementedError(m)
yield
if flag in ['wb', 'ab']:
self.flush()
self.fp.close()
def flush(self):
"""Flush all buffers and write the meta file."""
self.fp.flush()
self.serializer.dump(self.meta_dict, self.meta_file_path)
def append(self, id_, meta_data, frames, jpeg_encode_quality=_DEFAULT_JPEG_QUALITY):
""" Append an item to the gulp.
Parameters
----------
id_ : str
The ID of the item
meta_data: dict
The meta-data associated with the item.
frames: list of numpy arrays
The frames of the item as a list of numpy dictionaries consisting
of image pixel values.
"""
self._append_meta(id_, meta_data)
self._write_frames(id_, frames, jpeg_encode_quality=jpeg_encode_quality)
def read_frames(self, id_, slice_=None):
""" Read frames for a single item.
Parameters
----------
id_: str
The ID of the item
slice_: slice or list of ints:
A slice or list of indices with which to select frames.
Returns
-------
frames (int), meta(dict)
The frames of the item as a list of numpy arrays consisting of
image pixel values. And the metadata.
"""
frame_infos, meta_data = self._get_frame_infos(id_)
slice_element = slice_ if slice_ is not None else slice(0, len(frame_infos))
def extract_frame(frame_info):
self.fp.seek(frame_info.loc)
record = self.fp.read(frame_info.length)
img_str = record[:len(record)-frame_info.pad]
img = self.jpeg_decoder(img_str)
return img
if isinstance(slice_element, (list, np.ndarray)):
selected_frame_infos = [frame_infos[idx] for idx in slice_element]
else:
selected_frame_infos = frame_infos[slice_element]
frames = [extract_frame(frame_info)
for frame_info in selected_frame_infos]
return frames, meta_data
def iter_all(self, accepted_ids=None, shuffle=False):
""" Iterate over all frames in the gulp.
Parameters
----------
accepted_ids: list of str
A filter for accepted ids.
shuffle: bool
Shuffle the items or not.
Returns
-------
iterator
An iterator that yield a series of frames,meta tuples. See
`read_frames` for details.
"""
ids = self.meta_dict.keys()
if accepted_ids is not None:
intersection = list(set(ids) & set(accepted_ids))
ids = [id_ for id_ in ids if id_ in intersection]
if shuffle:
ids = list(ids)
np.random.shuffle(ids)
with self.open('rb'):
for id_ in ids:
frames, meta = self.read_frames(id_)
yield frames, meta
class ChunkWriter(object):
"""Can write from an adapter to a gulp chunk.
Parameters
----------
adapter: subclass of AbstractDatasetAdapter
The adapter to get items from.
"""
def __init__(self, adapter):
self.adapter = adapter
def write_chunk(self, output_chunk, input_slice):
"""Write from an input slice in the adapter to an output chunk.
Parameters
----------
output_chunk: GulpChunk
The chunk to write to
input_slice: slice
The slice to use from the adapter.
"""
with output_chunk.open('wb'):
for video in self.adapter.iter_data(input_slice):
id_ = video['id']
meta_data = video['meta']
frames = video['frames']
if len(frames) > 0:
output_chunk.append(id_, meta_data, frames, self.adapter.jpeg_encode_quality())
else:
print("Failed to write video with id: {}; no frames"
.format(id_))
def calculate_chunk_slices(items_per_chunk, num_items):
"""Calculate slices for indexing an adapter.
Parameters
----------
items_per_chunk: int
Approximate number of items per chunk.
num_items: int
Total number of items.
Returns
-------
list of slices
"""
assert items_per_chunk > 0
assert num_items > 0
return [slice(i, min(i + items_per_chunk, num_items))
for i in range(0, num_items, items_per_chunk)]
class GulpIngestor(object):
"""Ingest items from an adapter into an gulp chunks.
Parameters
----------
adapter: subclass of AbstractDatasetAdapter
The adapter to ingest from.
output_folder: str
The folder/directory to write to.
videos_per_chunk: int
The total number of items per chunk.
num_workers: int
The level of parallelism.
"""
def __init__(self, adapter, output_folder, videos_per_chunk, num_workers):
assert int(num_workers) > 0
self.adapter = adapter
self.output_folder = output_folder
self.videos_per_chunk = int(videos_per_chunk)
self.num_workers = int(num_workers)
def __call__(self):
os.makedirs(self.output_folder, exist_ok=True)
chunk_slices = calculate_chunk_slices(self.videos_per_chunk,
len(self.adapter))
gulp_directory = GulpDirectory(self.output_folder)
new_chunks = gulp_directory.new_chunks(len(chunk_slices))
chunk_writer = ChunkWriter(self.adapter)
with ProcessPoolExecutor(max_workers=self.num_workers) as executor:
result = executor.map(chunk_writer.write_chunk,
new_chunks,
chunk_slices)
for r in tqdm(result,
desc='Chunks finished',
unit='chunk',
dynamic_ncols=True,
total=len(chunk_slices)):
pass
| 32.685824 | 143 | 0.614582 | [
"MIT"
] | kiyoon/GulpIO2 | src/gulpio2/fileio.py | 17,062 | Python |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.rich_text_components import base
NONNEGATIVE_INT_SCHEMA = {
'type': 'int',
'validators': [{
'id': 'is_at_least',
'min_value': 0
}],
}
class Video(base.BaseRichTextComponent):
"""A rich-text component representing a YouTube video."""
name = 'Video'
category = 'Basic Input'
description = 'A YouTube video.'
frontend_name = 'video'
tooltip = 'Insert video'
_customization_arg_specs = [{
'name': 'video_id',
'description': (
'The YouTube id for this video. This is the 11-character string '
'after \'v=\' in the video URL.'),
'schema': {
'type': 'unicode',
},
'default_value': '',
}, {
'name': 'start',
'description': (
'Video start time in seconds: (leave at 0 to start at the '
'beginning.)'),
'schema': NONNEGATIVE_INT_SCHEMA,
'default_value': 0
}, {
'name': 'end',
'description': (
'Video end time in seconds: (leave at 0 to play until the end.)'),
'schema': NONNEGATIVE_INT_SCHEMA,
'default_value': 0
}, {
'name': 'autoplay',
'description': (
'Autoplay this video once the question has loaded?'),
'schema': {
'type': 'bool'
},
'default_value': False,
}]
icon_data_url = (
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAA'
'ABGdBTUEAAK/INwWK6QAAABl0RVh0%0AU29mdHdhcmUAQWRvYmUgSW1hZ2VSZWFkeXHJZ'
'TwAAAIfSURBVDjLpZNPaBNBGMXfbrubzBqbg4kL%0A0lJLgiVKE/AP6Kl6UUFQNAeDIAj'
'VS08aELx59GQPAREV/4BeiqcqROpRD4pUNCJSS21OgloISWME%0AZ/aPb6ARdNeTCz92m'
'O%2B9N9/w7RphGOJ/nsH%2Bolqtvg%2BCYJR8q9VquThxuVz%2BoJTKeZ63Uq/XC38E%0'
'A0Jj3ff8%2BOVupVGLbolkzQw5HOqAxQU4wXWWnZrykmYD0QsgAOJe9hpEUcPr8i0GaJ8'
'n2vs/sL2h8%0AR66TpVfWTdETHWE6GRGKjGiiKNLii5BSLpN7pBHpgMYhMkm8tPUWz3sL'
'2D1wFaY/jvnWcTTaE5Dy%0AjMfTT5J0XIAiTRYn3ASwZ1MKbTmN7z%2BKaHUOYqmb1fcP'
'iNa4kQBuyvWAHYfcHGzDgYcx9NKrwJYH%0ACAyF21JiPWBnXMAQOea6bmn%2B4ueYGZi8'
'gtymNVobF7BG5prNpjd%2BeW6X4BSUD0gOdCpzA8MpA/v2%0Av15kl4%2BpK0emwHSbjJ'
'GBlz%2BvYM1fQeDrYOBTdzOGvDf6EFNr%2BLYjHbBgsaCLxr%2BmoNQjU2vYhRXp%0AgI'
'UOmSWWnsJRfjlOZhrexgtYDZ/gWbetNRbNs6QT10GJglNk64HMaGgbAkoMo5fiFNy7CKD'
'QUGqE%0A5r38YktxAfSqW7Zt33l66WtkAkACjuNsaLVaDxlw5HdJ/86aYrG4WCgUZD6fX'
'%2Bjv/U0ymfxoWVZo%0AmuZyf%2B8XqfGP49CCrBUAAAAASUVORK5CYII%3D%0A'
)
| 36.686047 | 79 | 0.675436 | [
"Apache-2.0"
] | Atlas-Sailed-Co/oppia | extensions/rich_text_components/Video/Video.py | 3,155 | Python |
# Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mocktest import *
from pyqcy import *
import neat.locals.underload.trivial as trivial
import logging
logging.disable(logging.CRITICAL)
class Trivial(TestCase):
@qc(10)
def always_underloaded_factory(
time_step=int_(min=0, max=10),
migration_time=float_(min=0, max=10),
utilization=list_(of=float)
):
alg = trivial.always_underloaded_factory(time_step, migration_time, {})
assert alg(utilization) == (True, {})
def test_threshold_factory(self):
alg = trivial.threshold_factory(300, 20., {'threshold': 0.5})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (False, {}))
self.assertEqual(alg([0.0, 1.0]), (False, {}))
def test_last_n_average_threshold_factory(self):
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 2})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0.0, 0.0]), (True, {}))
self.assertEqual(alg([0.0, 0.4]), (True, {}))
self.assertEqual(alg([0.0, 0.5]), (True, {}))
self.assertEqual(alg([0.0, 0.6]), (True, {}))
self.assertEqual(alg([0.0, 1.0]), (True, {}))
self.assertEqual(alg([0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.2, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 1.0, 1.0]), (False, {}))
self.assertEqual(alg([0.0, 0.6, 0.6]), (False, {}))
alg = trivial.last_n_average_threshold_factory(
300, 20., {'threshold': 0.5,
'n': 3})
self.assertEqual(alg([0.0, 0.6, 0.6]), (True, {}))
def test_threshold(self):
self.assertEqual(trivial.threshold(0.5, []), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.0]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.4]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.5]), True)
self.assertEqual(trivial.threshold(0.5, [0.0, 0.6]), False)
self.assertEqual(trivial.threshold(0.5, [0.0, 1.0]), False)
def test_last_n_average_threshold(self):
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, []), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.4]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.5]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0]), True)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.2, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 1.0, 1.0]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 2, [0.0, 0.6, 0.6]), False)
self.assertEqual(trivial.last_n_average_threshold(
0.5, 3, [0.0, 0.6, 0.6]), True)
| 42.294737 | 79 | 0.595819 | [
"Apache-2.0"
] | MisterPup/OpenStack-Neat-Ceilometer | tests/locals/underload/test_trivial.py | 4,018 | Python |
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NSynth Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import numpy as np
import tensorflow as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
The NSynth Dataset is an audio dataset containing ~300k musical notes, each
with a unique pitch, timbre, and envelope. Each note is annotated with three
additional pieces of information based on a combination of human evaluation
and heuristic algorithms: Source, Family, and Qualities.
"""
_FULL_DESCRIPTION = """\
Full NSynth Dataset is split into train, valid, and test sets, with no
instruments overlapping between the train set and the valid/test sets.
"""
_GANSYNTH_DESCRIPTION = """\
NSynth Dataset limited to acoustic instruments in the MIDI pitch interval
[24, 84]. Uses alternate splits that have overlap in instruments (but not exact
notes) between the train set and valid/test sets. This variant was originally
introduced in the ICLR 2019 GANSynth paper (https://arxiv.org/abs/1902.08710).
"""
_F0_AND_LOUDNESS_ADDENDUM = """\
This version additionally contains estimates for F0 using CREPE
(Kim et al., 2018) and A-weighted perceptual loudness. Both signals are provided
at a frame rate of 250Hz.
"""
# From http://proceedings.mlr.press/v70/engel17a.html
_CITATION = """\
@InProceedings{pmlr-v70-engel17a,
title = {Neural Audio Synthesis of Musical Notes with {W}ave{N}et Autoencoders},
author = {Jesse Engel and Cinjon Resnick and Adam Roberts and Sander Dieleman and Mohammad Norouzi and Douglas Eck and Karen Simonyan},
booktitle = {Proceedings of the 34th International Conference on Machine Learning},
pages = {1068--1077},
year = {2017},
editor = {Doina Precup and Yee Whye Teh},
volume = {70},
series = {Proceedings of Machine Learning Research},
address = {International Convention Centre, Sydney, Australia},
month = {06--11 Aug},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v70/engel17a/engel17a.pdf},
url = {http://proceedings.mlr.press/v70/engel17a.html},
}
"""
_NUM_SECS = 4
_AUDIO_RATE = 16000 # 16 kHz
_F0_AND_LOUDNESS_RATE = 250 # 250 Hz
_INSTRUMENT_FAMILIES = [
"bass", "brass", "flute", "guitar", "keyboard", "mallet", "organ", "reed",
"string", "synth_lead", "vocal"]
_INSTRUMENT_SOURCES = ["acoustic", "electronic", "synthetic"]
_QUALITIES = [
"bright",
"dark",
"distortion",
"fast_decay",
"long_release",
"multiphonic",
"nonlinear_env",
"percussive",
"reverb",
"tempo-synced"]
_BASE_DOWNLOAD_PATH = "http://download.magenta.tensorflow.org/datasets/nsynth/nsynth-"
_SPLITS = ["train", "valid", "test"]
_SPLIT_SHARDS = {
"train": 512,
"valid": 32,
"test": 8,
}
class NsynthConfig(tfds.core.BuilderConfig):
"""BuilderConfig for NSynth Dataset."""
def __init__(self,
gansynth_subset=False,
estimate_f0_and_loudness=False,
**kwargs):
"""Constructs a NsynthConfig.
Args:
gansynth_subset: bool, whether to use the subset of the dataset introduced
in the ICLR 2019 GANSynth paper (Engel, et al. 2018). This subset uses
acoustic-only instrument sources and limits the pitches to the interval
[24, 84]. The train and test splits are also modified so that
instruments (but not specific notes) overlap between them. See
https://arxiv.org/abs/1902.08710 for more details.
estimate_f0_and_loudness: bool, whether to estimate fundamental frequency
(F0) and loudness for the audio (at 250 Hz) and add them to the set of
features.
**kwargs: keyword arguments forwarded to super.
"""
name_parts = []
if gansynth_subset:
name_parts.append("gansynth_subset")
else:
name_parts.append("full")
if estimate_f0_and_loudness:
name_parts.append("f0_and_loudness")
super(NsynthConfig, self).__init__(
name=".".join(name_parts),
version=tfds.core.Version(
"1.1.0", experiments={tfds.core.Experiment.S3: False}),
**kwargs)
self.gansynth_subset = gansynth_subset
self.estimate_f0_and_loudness = estimate_f0_and_loudness
class Nsynth(tfds.core.BeamBasedBuilder):
"""A large-scale and high-quality dataset of annotated musical notes."""
BUILDER_CONFIGS = [
NsynthConfig(description=_FULL_DESCRIPTION),
NsynthConfig(
gansynth_subset=True,
description=_GANSYNTH_DESCRIPTION),
NsynthConfig(
gansynth_subset=True,
estimate_f0_and_loudness=True,
description=_GANSYNTH_DESCRIPTION + _F0_AND_LOUDNESS_ADDENDUM),
]
def _info(self):
features = {
"id":
tf.string,
"audio":
tfds.features.Tensor(
shape=(_AUDIO_RATE * _NUM_SECS,), dtype=tf.float32),
"pitch":
tfds.features.ClassLabel(num_classes=128),
"velocity":
tfds.features.ClassLabel(num_classes=128),
"instrument": {
# We read the list of labels in _split_generators.
"label": tfds.features.ClassLabel(num_classes=1006),
"family": tfds.features.ClassLabel(names=_INSTRUMENT_FAMILIES),
"source": tfds.features.ClassLabel(names=_INSTRUMENT_SOURCES),
},
"qualities": {quality: tf.bool for quality in _QUALITIES},
}
if self.builder_config.estimate_f0_and_loudness:
f0_and_ld_shape = (_F0_AND_LOUDNESS_RATE * _NUM_SECS + 1,)
features["f0"] = {
"hz":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),
"midi":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32),
"confidence":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)
}
features["loudness"] = {
"db":
tfds.features.Tensor(shape=f0_and_ld_shape, dtype=tf.float32)
}
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
homepage="https://g.co/magenta/nsynth-dataset",
citation=_CITATION,
metadata=tfds.core.BeamMetadataDict(),
)
def _split_generators(self, dl_manager):
"""Returns splits."""
dl_urls = {}
dl_urls["examples"] = {
split: _BASE_DOWNLOAD_PATH + "%s.tfrecord.tar" % split
for split in _SPLITS
}
dl_urls["instrument_labels"] = (
_BASE_DOWNLOAD_PATH + "instrument_labels.txt")
if self.builder_config.gansynth_subset:
dl_urls["gansynth_splits"] = (
_BASE_DOWNLOAD_PATH + "gansynth_splits.csv")
dl_paths = dl_manager.download_and_extract(dl_urls)
with tf.io.gfile.GFile(dl_paths["instrument_labels"]) as f:
instrument_labels = f.read().strip().splitlines()
self.info.features["instrument"]["label"].names = instrument_labels
split_ids = {s: set() for s in _SPLITS}
split_dirs = {s: [dl_paths["examples"][s]] for s in _SPLITS}
if self.builder_config.gansynth_subset:
# Generator needs to see all original splits for each new split.
split_dirs = {s: dl_paths["examples"].values() for s in _SPLITS}
with tf.io.gfile.GFile(dl_paths["gansynth_splits"]) as f:
reader = csv.DictReader(f)
for row in reader:
split_ids[row["split"]].add(row["id"])
return [
tfds.core.SplitGenerator( # pylint: disable=g-complex-comprehension
name=split,
num_shards=_SPLIT_SHARDS[split],
gen_kwargs={
"tfrecord_dirs": split_dirs[split],
"ids": split_ids[split],
"split": split,
})
for split in _SPLITS
]
def _build_pcollection(self, pipeline, tfrecord_dirs, ids, split):
"""Build PCollection of examples for split."""
beam = tfds.core.lazy_imports.apache_beam
def _emit_base_example(ex):
"""Maps an input example to a TFDS example."""
beam.metrics.Metrics.counter(split, "base-examples").inc()
features = ex.features.feature
return {
"id": features["note_str"].bytes_list.value[0],
"audio":
np.array(features["audio"].float_list.value, dtype=np.float32),
"pitch":
features["pitch"].int64_list.value[0],
"velocity":
features["velocity"].int64_list.value[0],
"instrument": {
"label":
tf.compat.as_text(
features["instrument_str"].bytes_list.value[0]),
"family":
tf.compat.as_text(
features["instrument_family_str"].bytes_list.value[0]),
"source":
tf.compat.as_text(
features["instrument_source_str"].bytes_list.value[0])
},
"qualities": {
q: features["qualities"].int64_list.value[i]
for (i, q) in enumerate(_QUALITIES)
}
}
def _in_split(ex, split_ids):
if not split_ids or tf.compat.as_text(ex["id"]) in split_ids:
beam.metrics.Metrics.counter(split, "in-split").inc()
return True
return False
def _estimate_f0(ex):
"""Estimate the fundamental frequency using CREPE and add to example."""
ex = ex.copy()
beam.metrics.Metrics.counter(split, "estimate-f0").inc()
_, f0_hz, f0_confidence, _ = tfds.core.lazy_imports.crepe.predict(
ex["audio"],
sr=_AUDIO_RATE,
viterbi=True,
step_size=1000 / _F0_AND_LOUDNESS_RATE,
verbose=0)
f0_midi = tfds.core.lazy_imports.librosa.core.hz_to_midi(f0_hz)
# Set -infs introduced by hz_to_midi to 0.
f0_midi[f0_midi == -np.inf] = 0
# Set nans to 0 in confidence.
f0_confidence = np.nan_to_num(f0_confidence)
ex["f0"] = {
"hz": f0_hz.astype(np.float32),
"midi": f0_midi.astype(np.float32),
"confidence": f0_confidence.astype(np.float32),
}
return ex
def _compute_loudness(ex):
"""Compute loudness and add to example."""
ex = ex.copy()
beam.metrics.Metrics.counter(split, "compute-loudness").inc()
librosa = tfds.core.lazy_imports.librosa
n_fft = 2048
amin = 1e-15
top_db = 200.0
stft = librosa.stft(
ex["audio"],
n_fft=n_fft,
hop_length=int(_AUDIO_RATE // _F0_AND_LOUDNESS_RATE))
loudness_db = librosa.perceptual_weighting(
np.abs(stft)**2,
librosa.fft_frequencies(_AUDIO_RATE, n_fft=n_fft),
amin=amin,
top_db=top_db)
# Average across freq in linear scale.
mean_loudness_amp = np.mean(librosa.db_to_amplitude(loudness_db), axis=0)
mean_loudness_db = librosa.amplitude_to_db(
mean_loudness_amp,
amin=amin,
top_db=top_db)
ex["loudness"] = {"db": mean_loudness_db.astype(np.float32)}
return ex
examples = (
pipeline
| beam.Create([os.path.join(dir_, "*") for dir_ in tfrecord_dirs])
| beam.io.tfrecordio.ReadAllFromTFRecord(
coder=beam.coders.ProtoCoder(tf.train.Example))
| beam.Map(_emit_base_example)
| beam.Filter(_in_split, split_ids=ids))
if self.builder_config.estimate_f0_and_loudness:
examples = (
examples
| beam.Reshuffle()
| beam.Map(_estimate_f0)
| beam.Map(_compute_loudness))
if split == tfds.Split.TRAIN:
# Output mean and variance of loudness for TRAIN split.
loudness = examples | beam.Map(lambda x: np.mean(x["loudness"]["db"]))
loudness_mean = (
loudness
| "loudness_mean" >> beam.combiners.Mean.Globally())
loudness_variance = (
loudness
| beam.Map(lambda ld, ld_mean: (ld - ld_mean)**2,
ld_mean=beam.pvalue.AsSingleton(loudness_mean))
| "loudness_variance" >> beam.combiners.Mean.Globally())
self.info.metadata["loudness_db_mean"] = loudness_mean
self.info.metadata["loudness_db_variance"] = loudness_variance
return examples
| 36.573864 | 139 | 0.645332 | [
"Apache-2.0"
] | Alex-Fabbri/datasets | tensorflow_datasets/audio/nsynth.py | 12,874 | Python |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=unused-argument
from azure.cli.core.util import sdk_no_wait
def databoxedge_device_create(client,
device_name,
resource_group_name,
location,
tags=None,
sku=None,
etag=None,
data_box_edge_device_status=None,
description=None,
model_description=None,
friendly_name=None,
no_wait=False):
data_box_edge_device = {}
data_box_edge_device['location'] = location
data_box_edge_device['tags'] = tags
data_box_edge_device['etag'] = etag
data_box_edge_device['data_box_edge_device_status'] = data_box_edge_device_status
data_box_edge_device['description'] = description
data_box_edge_device['model_description'] = model_description
data_box_edge_device['friendly_name'] = friendly_name
if sku:
data_box_edge_device['sku'] = {}
data_box_edge_device['sku']['name'] = sku
return sdk_no_wait(no_wait,
client.create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
data_box_edge_device=data_box_edge_device)
def databoxedge_device_update(client,
device_name,
resource_group_name,
tags=None):
if tags is None:
return client.get(device_name=device_name,
resource_group_name=resource_group_name)
parameters = {'tags': tags}
return client.update(device_name=device_name,
resource_group_name=resource_group_name,
parameters=parameters)
def databoxedge_bandwidth_schedule_update(instance,
device_name,
name,
resource_group_name,
start=None,
stop=None,
rate_in_mbps=None,
days=None,
no_wait=False):
if start is not None:
instance.start = start
if stop is not None:
instance.stop = stop
if rate_in_mbps is not None:
instance.rate_in_mbps = rate_in_mbps
if days is not None:
instance.days = days
return instance
def databoxedge_order_create(client,
device_name,
resource_group_name,
address_line1,
postal_code,
city,
state,
country,
contact_person,
company_name,
phone,
email_list,
status=None,
comments=None,
address_line2=None,
address_line3=None,
no_wait=False):
order = {}
if status:
order['current_status'] = {}
order['current_status']['status'] = status
order['current_status']['comments'] = comments
order['shipping_address'] = {}
order['shipping_address']['address_line1'] = address_line1
order['shipping_address']['address_line2'] = address_line2
order['shipping_address']['address_line3'] = address_line3
order['shipping_address']['postal_code'] = postal_code
order['shipping_address']['city'] = city
order['shipping_address']['state'] = state
order['shipping_address']['country'] = country
order['contact_information'] = {}
order['contact_information']['contact_person'] = contact_person
order['contact_information']['company_name'] = company_name
order['contact_information']['phone'] = phone
order['contact_information']['email_list'] = email_list
return sdk_no_wait(no_wait,
client.create_or_update,
device_name=device_name,
resource_group_name=resource_group_name,
order=order)
| 42.109244 | 85 | 0.50908 | [
"MIT"
] | 00Kai0/azure-cli | src/azure-cli/azure/cli/command_modules/databoxedge/manual/custom.py | 5,011 | Python |
# -*- coding:utf-8 -*-
import csv
import fileinput
import sys
import numpy
from pynm.feature.metric.itml import learn_metric, convert_data
class ItmlCommand:
name = 'itml'
help = 'Information Theoretic Metric Learning'
@classmethod
def build_arg_parser(cls, parser):
parser.add_argument('-i',
'--input_data',
default='-',
type=str,
metavar='FILE',
help='input data file (default: stdin)')
label_or_pair = parser.add_mutually_exclusive_group(required=True)
label_or_pair.add_argument('-l',
'--input_labels',
default=None,
type=str,
metavar='FILE',
help='input labels file')
label_or_pair.add_argument('-p',
'--input_pairs',
default=None,
type=str,
metavar='FILE',
help='input pairs file')
parser.add_argument('-o',
'--output_data',
default=None,
type=str,
metavar='FILE',
help='output data file')
parser.add_argument('-m',
'--output_metric',
default=None,
type=str,
metavar='FILE',
help='output metric file')
parser.add_argument('-w',
'--output_weights',
default=None,
type=str,
metavar='FILE',
help='output weights file')
parser.add_argument('-d',
'--delimiter',
default='\t',
type=str,
metavar='DELIM',
help='delimiter (default: "\\t")')
parser.add_argument('-s',
'--sparse',
action='store_true',
help='sparse format (not implemented yet)')
parser.add_argument('--header',
action='store_true',
help='has header')
parser.add_argument('-U',
'--u_param',
default=1.0,
type=float,
metavar='DISTANCE',
help='U parameter (max distance for same labels, default: 1.0)')
parser.add_argument('-L',
'--l_param',
default=1.0,
type=float,
metavar='DISTANCE',
help='L parameter (min distance for different labels, default: 1.0)')
parser.add_argument('-S',
'--slack',
default=1.0,
type=float,
metavar='SLACK',
help='slack variable (default: 1.0)')
parser.add_argument('-N',
'--max_iteration_number',
default=1000,
type=int,
metavar='MAX',
help='max iteration (default: 1000)')
def run(self, args):
with fileinput.input(args.input_data) as in_:
header, data = self.load_data(in_,
delimiter=args.delimiter,
has_header=args.header)
if args.input_labels is not None:
with fileinput.input(args.input_labels) as in_:
labels = self.load_labels(in_)
pairs = None
elif args.input_pairs is not None:
with fileinput.input(args.input_pairs) as in_:
pairs = self.load_pairs(in_)
labels = None
metric = learn_metric(data,
labels=labels,
pairs=pairs,
u=args.u_param,
l=args.l_param,
slack=args.slack,
max_iter=args.max_iteration_number,
is_sparse=args.sparse)
if args.output_metric is not None:
if args.output_metric == '-':
self.export_metric(sys.stdout, metric, header)
else:
with open(args.output_metric, 'w') as o_:
self.export_metric(o_, metric, header)
if args.output_weights is not None:
weights = numpy.diag(metric)
if args.output_weights == '-':
self.export_weights(sys.stdout, weights, header)
else:
with open(args.output_weights, 'w') as o_:
self.export_weights(o_, weights, header)
if args.output_data is not None:
converted_data = convert_data(metric, data)
if args.output_data == '-':
self.export_data(sys.stdout, converted_data, header)
else:
with open(args.output_data, 'w') as o_:
self.export_data(o_, converted_data, header)
return 0
def load_data(self,
input_data,
delimiter='\t',
has_header=False):
reader = csv.reader(input_data, delimiter=delimiter)
if has_header:
header = {value: key for key, value in enumerate(reader.next())}
else:
header = None
data = []
for row in reader:
data.append(numpy.array(list(map(lambda x: float(x), row))))
return header, data
def load_labels(self, input_labels):
return list(map(lambda x: int(x), input_labels))
def load_pairs(self, input_pairs, delimiter='\t', header=None):
pairs = []
if header is None:
for line in input_pairs:
row = line.split(delimiter)
idx1 = int(row[0])
idx2 = int(row[1])
similar = int(row[2]) > 0
pairs.append((idx1, idx2, similar))
else:
for line in input_pairs:
row = line.split(delimiter)
idx1 = header[row[0]]
idx2 = header[row[1]]
similar = int(row[2]) > 0
pairs.append((idx1, idx2, similar))
return pairs
def export_metric(self,
output,
metric,
header=None,
sparse=False):
if sparse:
raise NotImplementedError('sparse is not supported yet.')
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
for row in metric:
writer.writerow(row)
def export_weights(self,
output,
weights,
header=None):
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
writer.writerow(weights)
def export_data(self,
output,
data,
header=None,
sparse=False):
if sparse:
raise NotImplementedError('sparse is not supported yet.')
writer = csv.writer(output)
if header is not None:
writer.writerow(header)
for row in data:
writer.writerow(row)
class MetricCommand:
name = 'metric'
help = 'Metric Learning'
sub_commands = [ItmlCommand]
default_command = sub_commands[0]
def build_arg_parser(self, parser):
self.default_command.build_arg_parser(parser)
subparsers = parser.add_subparsers(title='algorithm', dest='algorithm')
for command in self.sub_commands:
subparser = subparsers.add_parser(command.name, help=command.help)
command.build_arg_parser(subparser)
def run(self, args):
sub_command = self._get_sub_command(args.algorithm)
return sub_command.run(args)
def _get_sub_command(self, algorithm):
if algorithm is None:
return self.default_command()
return next(filter(lambda x: x.name == algorithm, self.sub_commands))()
| 37.270042 | 97 | 0.452621 | [
"MIT"
] | ohtaman/pynm | pynm/commands/metric.py | 8,833 | Python |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import os.path
import re
from math import ceil
from ipaddress import ip_network
from knack.log import get_logger
from azure.cli.core.util import CLIError
import azure.cli.core.keys as keys
logger = get_logger(__name__)
def validate_ssh_key(namespace):
if hasattr(namespace, 'no_ssh_key') and namespace.no_ssh_key:
return
string_or_file = (namespace.ssh_key_value or
os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub'))
content = string_or_file
if os.path.exists(string_or_file):
logger.info('Use existing SSH public key file: %s', string_or_file)
with open(string_or_file, 'r') as f:
content = f.read()
elif not keys.is_valid_ssh_rsa_public_key(content):
if namespace.generate_ssh_keys:
# figure out appropriate file names:
# 'base_name'(with private keys), and 'base_name.pub'(with public keys)
public_key_filepath = string_or_file
if public_key_filepath[-4:].lower() == '.pub':
private_key_filepath = public_key_filepath[:-4]
else:
private_key_filepath = public_key_filepath + '.private'
content = keys.generate_ssh_keys(private_key_filepath, public_key_filepath)
logger.warning("SSH key files '%s' and '%s' have been generated under ~/.ssh to "
"allow SSH access to the VM. If using machines without "
"permanent storage like Azure Cloud Shell without an attached "
"file share, back up your keys to a safe location",
private_key_filepath, public_key_filepath)
else:
raise CLIError('An RSA key file or key value must be supplied to SSH Key Value. '
'You can use --generate-ssh-keys to let CLI generate one for you')
namespace.ssh_key_value = content
def validate_create_parameters(namespace):
if not namespace.name:
raise CLIError('--name has no value')
if namespace.dns_name_prefix is not None and not namespace.dns_name_prefix:
raise CLIError('--dns-prefix has no value')
def validate_k8s_version(namespace):
"""Validates a string as a possible Kubernetes version. An empty string is also valid, which tells the server
to use its default version."""
if namespace.kubernetes_version:
k8s_release_regex = re.compile(r'^[v|V]?(\d+\.\d+\.\d+.*)$')
found = k8s_release_regex.findall(namespace.kubernetes_version)
if found:
namespace.kubernetes_version = found[0]
else:
raise CLIError('--kubernetes-version should be the full version number, '
'such as "1.7.12" or "1.8.7"')
def validate_linux_host_name(namespace):
"""Validates a string as a legal host name component.
This validation will also occur server-side in the ARM API, but that may take
a minute or two before the user sees it. So it's more user-friendly to validate
in the CLI pre-flight.
"""
# https://stackoverflow.com/questions/106179/regular-expression-to-match-dns-hostname-or-ip-address
rfc1123_regex = re.compile(r'^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$') # pylint:disable=line-too-long
found = rfc1123_regex.findall(namespace.name)
if not found:
raise CLIError('--name cannot exceed 63 characters and can only contain '
'letters, numbers, or dashes (-).')
def validate_max_pods(namespace):
"""Validates that max_pods is set to a reasonable minimum number."""
# kube-proxy and kube-svc reside each nodes,
# 2 kube-proxy pods, 1 azureproxy/heapster/dashboard/tunnelfront are in kube-system
minimum_pods_required = ceil((namespace.node_count * 2 + 6 + 1) / namespace.node_count)
if namespace.max_pods != 0 and namespace.max_pods < minimum_pods_required:
raise CLIError('--max-pods must be at least {} for a managed Kubernetes cluster to function.'
.format(minimum_pods_required))
def validate_nodes_count(namespace):
"""Validate that min_count and max_count is set to 1-100"""
if namespace.min_count is not None:
if namespace.min_count < 1 or namespace.min_count > 100:
raise CLIError('--min-count must be in the range [1,100]')
if namespace.max_count is not None:
if namespace.max_count < 1 or namespace.max_count > 100:
raise CLIError('--max-count must be in the range [1,100]')
def validate_ip_ranges(namespace):
if namespace.api_server_authorized_ip_ranges is not None:
if namespace.api_server_authorized_ip_ranges == '':
return
for ip in namespace.api_server_authorized_ip_ranges.split(','):
try:
ip_network(ip)
except ValueError:
raise CLIError("--api-server-authorized-ip-ranges should be list of IPv4 addresses or CIDRs")
def validate_nodepool_name(namespace):
"""Validates a nodepool name to be at most 12 characters, alphanumeric only."""
if namespace.nodepool_name != "":
if len(namespace.nodepool_name) > 12:
raise CLIError('--nodepool-name can contain atmost 12 characters')
if not namespace.nodepool_name.isalnum():
raise CLIError('--nodepool-name should only contain alphanumeric characters')
def validate_vm_set_type(namespace):
"""Validates the vm set type string."""
if namespace.vm_set_type is not None:
if namespace.vm_set_type == '':
return
if namespace.vm_set_type.lower() != "availabilityset" and \
namespace.vm_set_type.lower() != "virtualmachinescalesets":
raise CLIError("--vm-set-type can only be VirtualMachineScaleSets or AvailabilitySet")
def validate_load_balancer_sku(namespace):
"""Validates the load balancer sku string."""
if namespace.load_balancer_sku is not None:
if namespace.load_balancer_sku == '':
return
if namespace.load_balancer_sku.lower() != "basic" and namespace.load_balancer_sku.lower() != "standard":
raise CLIError("--load-balancer-sku can only be standard or basic")
def validate_load_balancer_outbound_ips(namespace):
"""validate load balancer profile outbound IP ids"""
if namespace.load_balancer_outbound_ips is not None:
ip_id_list = [x.strip() for x in namespace.load_balancer_outbound_ips.split(',')]
if not all(ip_id_list):
raise CLIError("--load-balancer-outbound-ips cannot contain whitespace")
def validate_load_balancer_outbound_ip_prefixes(namespace):
"""validate load balancer profile outbound IP prefix ids"""
if namespace.load_balancer_outbound_ip_prefixes is not None:
ip_prefix_id_list = [x.strip() for x in namespace.load_balancer_outbound_ip_prefixes.split(',')]
if not all(ip_prefix_id_list):
raise CLIError("--load-balancer-outbound-ip-prefixes cannot contain whitespace")
def validate_taints(namespace):
"""Validates that provided taint is a valid format"""
regex = re.compile(r"^[a-zA-Z\d][\w\-\.\/]{0,252}=[a-zA-Z\d][\w\-\.]{0,62}:(NoSchedule|PreferNoSchedule|NoExecute)$") # pylint: disable=line-too-long
if namespace.node_taints is not None and namespace.node_taints != '':
for taint in namespace.node_taints.split(','):
if taint == "":
continue
found = regex.findall(taint)
if not found:
raise CLIError('Invalid node taint: %s' % taint)
def validate_priority(namespace):
"""Validates the node pool priority string."""
if namespace.priority is not None:
if namespace.priority == '':
return
if namespace.priority != "Low" and \
namespace.priority != "Regular":
raise CLIError("--priority can only be Low or Regular")
def validate_eviction_policy(namespace):
"""Validates the node pool priority string."""
if namespace.eviction_policy is not None:
if namespace.eviction_policy == '':
return
if namespace.eviction_policy != "Delete" and \
namespace.eviction_policy != "Deallocate":
raise CLIError("--eviction-policy can only be Delete or Deallocate")
| 45.388601 | 184 | 0.649201 | [
"MIT"
] | andyzhangx/azure-cli-extensions | src/aks-preview/azext_aks_preview/_validators.py | 8,760 | Python |
import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import cv2
import config
from utils import Mesh
from models import CMR
from models.smpl_from_lib import SMPL
from utils.pose_utils import compute_similarity_transform_batch, \
scale_and_translation_transform_batch
from utils.cam_utils import orthographic_project_torch, undo_keypoint_normalisation
from datasets.my_3dpw_eval_dataset import PW3DEvalDataset
def evaluate_3dpw(model,
eval_dataset,
metrics,
device,
vis_save_path,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000):
eval_dataloader = DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
drop_last=True,
num_workers=num_workers,
pin_memory=pin_memory)
smpl = SMPL(config.SMPL_MODEL_DIR, batch_size=1)
smpl_male = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='male')
smpl_female = SMPL(config.SMPL_MODEL_DIR, batch_size=1, gender='female')
smpl.to(device)
smpl_male.to(device)
smpl_female.to(device)
J_regressor = torch.from_numpy(np.load(config.JOINT_REGRESSOR_H36M)).float()
J_regressor_batch = J_regressor[None, :].to(device)
if 'pve' in metrics:
pve_smpl_sum = 0.0
pve_graph_sum = 0.0
pve_smpl_per_frame = []
pve_graph_per_frame = []
if 'pve_scale_corrected' in metrics:
pve_scale_corrected_smpl_sum = 0.0
pve_scale_corrected_graph_sum = 0.0
pve_scale_corrected_smpl_per_frame = []
pve_scale_corrected_graph_per_frame = []
if 'pve_pa' in metrics:
pve_pa_smpl_sum = 0.0
pve_pa_graph_sum = 0.0
pve_pa_smpl_per_frame = []
pve_pa_graph_per_frame = []
if 'pve-t' in metrics:
pvet_sum = 0.0
pvet_per_frame = []
if 'pve-t_scale_corrected' in metrics:
pvet_scale_corrected_sum = 0.0
pvet_scale_corrected_per_frame = []
if 'mpjpe' in metrics:
mpjpe_smpl_sum = 0.0
mpjpe_graph_sum = 0.0
mpjpe_smpl_per_frame = []
mpjpe_graph_per_frame = []
if 'mpjpe_scale_corrected' in metrics:
mpjpe_scale_corrected_smpl_sum = 0.0
mpjpe_scale_corrected_graph_sum = 0.0
mpjpe_scale_corrected_smpl_per_frame = []
mpjpe_scale_corrected_graph_per_frame = []
if 'j3d_rec_err' in metrics:
j3d_rec_err_smpl_sum = 0.0
j3d_rec_err_graph_sum = 0.0
j3d_rec_err_smpl_per_frame = []
j3d_rec_err_graph_per_frame = []
if 'pve_2d' in metrics:
pve_2d_smpl_sum = 0.0
pve_2d_graph_sum = 0.0
if 'pve_2d_scale_corrected' in metrics:
pve_2d_scale_corrected_smpl_sum = 0.0
pve_2d_scale_corrected_graph_sum = 0.0
if 'pve_2d_pa' in metrics:
pve_2d_pa_smpl_sum = 0.0
pve_2d_pa_graph_sum = 0.0
num_samples = 0
num_vertices = 6890
num_joints3d = 14
model.eval()
for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)):
# ------------------------------- TARGETS and INPUTS -------------------------------
input = samples_batch['input']
input = input.to(device)
target_pose = samples_batch['pose'].to(device)
target_shape = samples_batch['shape'].to(device)
target_gender = samples_batch['gender'][0]
if target_gender == 'm':
target_smpl_output = smpl_male(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_vertices = target_smpl_output.vertices
target_reposed_smpl_output = smpl_male(betas=target_shape)
target_reposed_vertices = target_reposed_smpl_output.vertices
target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)
target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]
elif target_gender == 'f':
target_smpl_output = smpl_female(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_vertices = target_smpl_output.vertices
target_reposed_smpl_output = smpl_female(betas=target_shape)
target_reposed_vertices = target_reposed_smpl_output.vertices
target_joints_h36m = torch.matmul(J_regressor_batch, target_vertices)
target_joints_h36mlsp = target_joints_h36m[:, config.H36M_TO_J14, :]
# ------------------------------- PREDICTIONS -------------------------------
pred_vertices, pred_vertices_smpl, pred_camera, pred_rotmat, pred_betas = model(input)
pred_vertices_projected2d = orthographic_project_torch(pred_vertices, pred_camera)
pred_vertices_projected2d = undo_keypoint_normalisation(pred_vertices_projected2d, input.shape[-1])
pred_vertices_smpl_projected2d = orthographic_project_torch(pred_vertices_smpl, pred_camera)
pred_vertices_smpl_projected2d = undo_keypoint_normalisation(pred_vertices_smpl_projected2d, input.shape[-1])
pred_reposed_smpl_output = smpl(betas=pred_betas)
pred_reposed_vertices = pred_reposed_smpl_output.vertices
pred_joints_h36m = torch.matmul(J_regressor_batch, pred_vertices)
pred_joints_h36mlsp = pred_joints_h36m[:, config.H36M_TO_J14, :]
pred_joints_smpl_h36m = torch.matmul(J_regressor_batch, pred_vertices_smpl)
pred_joints_smpl_h36mlsp = pred_joints_smpl_h36m[:, config.H36M_TO_J14, :]
# Numpy-fying
target_vertices = target_vertices.cpu().detach().numpy()
target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy()
target_joints_h36mlsp = target_joints_h36mlsp.cpu().detach().numpy()
pred_vertices = pred_vertices.cpu().detach().numpy()
pred_vertices_smpl = pred_vertices_smpl.cpu().detach().numpy()
pred_vertices_projected2d = pred_vertices_projected2d.cpu().detach().numpy()
pred_vertices_smpl_projected2d = pred_vertices_smpl_projected2d.cpu().detach().numpy()
pred_reposed_vertices = pred_reposed_vertices.cpu().detach().numpy()
pred_joints_h36mlsp = pred_joints_h36mlsp.cpu().detach().numpy()
pred_joints_smpl_h36mlsp = pred_joints_smpl_h36mlsp.cpu().detach().numpy()
# ------------------------------- METRICS -------------------------------
if 'pve' in metrics:
pve_smpl_batch = np.linalg.norm(pred_vertices_smpl - target_vertices, axis=-1) # (1, 6890)
pve_graph_batch = np.linalg.norm(pred_vertices - target_vertices, axis=-1)
pve_smpl_sum += np.sum(pve_smpl_batch) # scalar
pve_graph_sum += np.sum(pve_graph_batch)
pve_smpl_per_frame.append(np.mean(pve_smpl_batch, axis=-1))
pve_graph_per_frame.append(np.mean(pve_graph_batch, axis=-1))
# Scale and translation correction
if 'pve_scale_corrected' in metrics:
pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,
target_vertices)
pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,
target_vertices)
pve_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_sc - target_vertices,
axis=-1) # (1, 6890)
pve_sc_graph_batch = np.linalg.norm(pred_vertices_sc - target_vertices,
axis=-1) # (1, 6890)
pve_scale_corrected_smpl_sum += np.sum(pve_sc_smpl_batch) # scalar
pve_scale_corrected_graph_sum += np.sum(pve_sc_graph_batch) # scalar
pve_scale_corrected_smpl_per_frame.append(np.mean(pve_sc_smpl_batch, axis=-1))
pve_scale_corrected_graph_per_frame.append(np.mean(pve_sc_graph_batch, axis=-1))
# Procrustes analysis
if 'pve_pa' in metrics:
pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)
pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)
pve_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_pa - target_vertices, axis=-1) # (1, 6890)
pve_pa_graph_batch = np.linalg.norm(pred_vertices_pa - target_vertices, axis=-1) # (1, 6890)
pve_pa_smpl_sum += np.sum(pve_pa_smpl_batch) # scalar
pve_pa_graph_sum += np.sum(pve_pa_graph_batch) # scalar
pve_pa_smpl_per_frame.append(np.mean(pve_pa_smpl_batch, axis=-1))
pve_pa_graph_per_frame.append(np.mean(pve_pa_graph_batch, axis=-1))
if 'pve-t' in metrics:
pvet_batch = np.linalg.norm(pred_reposed_vertices - target_reposed_vertices, axis=-1)
pvet_sum += np.sum(pvet_batch)
pvet_per_frame.append(np.mean(pvet_batch, axis=-1))
# Scale and translation correction
if 'pve-t_scale_corrected' in metrics:
pred_reposed_vertices_sc = scale_and_translation_transform_batch(pred_reposed_vertices,
target_reposed_vertices)
pvet_scale_corrected_batch = np.linalg.norm(pred_reposed_vertices_sc - target_reposed_vertices,
axis=-1) # (bs, 6890)
pvet_scale_corrected_sum += np.sum(pvet_scale_corrected_batch) # scalar
pvet_scale_corrected_per_frame.append(np.mean(pvet_scale_corrected_batch, axis=-1))
if 'mpjpe' in metrics:
mpjpe_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)
mpjpe_graph_batch = np.linalg.norm(pred_joints_h36mlsp - target_joints_h36mlsp, axis=-1) # (bs, 14)
mpjpe_smpl_sum += np.sum(mpjpe_smpl_batch)
mpjpe_graph_sum += np.sum(mpjpe_graph_batch)
mpjpe_smpl_per_frame.append(np.mean(mpjpe_smpl_batch, axis=-1))
mpjpe_graph_per_frame.append(np.mean(mpjpe_graph_batch, axis=-1))
# Scale and translation correction
if 'mpjpe_scale_corrected' in metrics:
pred_joints_smpl_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_smpl_h36mlsp,
target_joints_h36mlsp)
pred_joints_h36mlsp_sc = scale_and_translation_transform_batch(pred_joints_h36mlsp,
target_joints_h36mlsp)
mpjpe_scale_corrected_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
mpjpe_scale_corrected_graph_batch = np.linalg.norm(pred_joints_h36mlsp_sc - target_joints_h36mlsp,
axis=-1) # (bs, 14)
mpjpe_scale_corrected_smpl_sum += np.sum(mpjpe_scale_corrected_smpl_batch)
mpjpe_scale_corrected_graph_sum += np.sum(mpjpe_scale_corrected_graph_batch)
mpjpe_scale_corrected_smpl_per_frame.append(np.mean(mpjpe_scale_corrected_smpl_batch, axis=-1))
mpjpe_scale_corrected_graph_per_frame.append(np.mean(mpjpe_scale_corrected_graph_batch, axis=-1))
# Procrustes analysis
if 'j3d_rec_err' in metrics:
pred_joints_smpl_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_smpl_h36mlsp,
target_joints_h36mlsp)
pred_joints_h36mlsp_pa = compute_similarity_transform_batch(pred_joints_h36mlsp, target_joints_h36mlsp)
j3d_rec_err_smpl_batch = np.linalg.norm(pred_joints_smpl_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
j3d_rec_err_graph_batch = np.linalg.norm(pred_joints_h36mlsp_pa - target_joints_h36mlsp, axis=-1) # (bs, 14)
j3d_rec_err_smpl_sum += np.sum(j3d_rec_err_smpl_batch)
j3d_rec_err_graph_sum += np.sum(j3d_rec_err_graph_batch)
j3d_rec_err_smpl_per_frame.append(np.mean(j3d_rec_err_smpl_batch, axis=-1))
j3d_rec_err_graph_per_frame.append(np.mean(j3d_rec_err_graph_batch, axis=-1))
if 'pve_2d' in metrics:
pred_vertices_smpl_2d = pred_vertices_smpl[:, :, :2]
pred_vertices_2d = pred_vertices[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_graph_batch = np.linalg.norm(pred_vertices_2d - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_smpl_sum += np.sum(pve_2d_smpl_batch)
pve_2d_graph_sum += np.sum(pve_2d_graph_batch)
# Scale and translation correction
if 'pve_2d_scale_corrected' in metrics:
pred_vertices_smpl_sc = scale_and_translation_transform_batch(pred_vertices_smpl,
target_vertices)
pred_vertices_sc = scale_and_translation_transform_batch(pred_vertices,
target_vertices)
pred_vertices_smpl_2d_sc = pred_vertices_smpl_sc[:, :, :2]
pred_vertices_2d_sc = pred_vertices_sc[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_sc_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_sc - target_vertices_2d,
axis=-1) # (bs, 6890)
pve_2d_sc_graph_batch = np.linalg.norm(pred_vertices_2d_sc - target_vertices_2d,
axis=-1) # (bs, 6890)
pve_2d_scale_corrected_smpl_sum += np.sum(pve_2d_sc_smpl_batch)
pve_2d_scale_corrected_graph_sum += np.sum(pve_2d_sc_graph_batch)
# Procrustes analysis
if 'pve_2d_pa' in metrics:
pred_vertices_smpl_pa = compute_similarity_transform_batch(pred_vertices_smpl, target_vertices)
pred_vertices_pa = compute_similarity_transform_batch(pred_vertices, target_vertices)
pred_vertices_smpl_2d_pa = pred_vertices_smpl_pa[:, :, :2]
pred_vertices_2d_pa = pred_vertices_pa[:, :, :2]
target_vertices_2d = target_vertices[:, :, :2]
pve_2d_pa_smpl_batch = np.linalg.norm(pred_vertices_smpl_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_pa_graph_batch = np.linalg.norm(pred_vertices_2d_pa - target_vertices_2d, axis=-1) # (bs, 6890)
pve_2d_pa_smpl_sum += np.sum(pve_2d_pa_smpl_batch)
pve_2d_pa_graph_sum += np.sum(pve_2d_pa_graph_batch)
num_samples += target_pose.shape[0]
# ------------------------------- VISUALISE -------------------------------
if vis_every_n_batches is not None:
if batch_num % vis_every_n_batches == 0:
vis_imgs = samples_batch['vis_img'].numpy()
vis_imgs = np.transpose(vis_imgs, [0, 2, 3, 1])
fnames = samples_batch['fname']
plt.figure(figsize=(16, 12))
plt.subplot(341)
plt.imshow(vis_imgs[0])
plt.subplot(342)
plt.imshow(vis_imgs[0])
plt.scatter(pred_vertices_projected2d[0, :, 0], pred_vertices_projected2d[0, :, 1], s=0.1, c='r')
plt.subplot(343)
plt.imshow(vis_imgs[0])
plt.scatter(pred_vertices_smpl_projected2d[0, :, 0], pred_vertices_smpl_projected2d[0, :, 1], s=0.1, c='r')
plt.subplot(345)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices[0, :, 0], pred_vertices[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(346)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_smpl[0, :, 0], pred_vertices_smpl[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(347)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_pa[0, :, 0], pred_vertices_pa[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(348)
plt.scatter(target_vertices[0, :, 0], target_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_vertices_smpl_pa[0, :, 0], pred_vertices_smpl_pa[0, :, 1], s=0.1, c='r')
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(349)
plt.scatter(target_reposed_vertices[0, :, 0], target_reposed_vertices[0, :, 1], s=0.1, c='b')
plt.scatter(pred_reposed_vertices_sc[0, :, 0], pred_reposed_vertices_sc[0, :, 1], s=0.1, c='r')
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 10)
for j in range(num_joints3d):
plt.scatter(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_h36mlsp[0, j, 0], pred_joints_h36mlsp[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 11)
for j in range(num_joints3d):
plt.scatter(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_h36mlsp_pa[0, j, 0], pred_joints_h36mlsp_pa[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(3, 4, 12)
for j in range(num_joints3d):
plt.scatter(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], c='r')
plt.scatter(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], c='b')
plt.text(pred_joints_smpl_h36mlsp_pa[0, j, 0], pred_joints_smpl_h36mlsp_pa[0, j, 1], s=str(j))
plt.text(target_joints_h36mlsp[0, j, 0], target_joints_h36mlsp[0, j, 1], s=str(j))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal', adjustable='box')
# plt.show()
save_fig_path = os.path.join(vis_save_path, fnames[0])
plt.savefig(save_fig_path, bbox_inches='tight')
plt.close()
if 'pve' in metrics:
pve_smpl = pve_smpl_sum / (num_samples * num_vertices)
print('PVE SMPL: {:.5f}'.format(pve_smpl))
pve_graph = pve_graph_sum / (num_samples * num_vertices)
print('PVE GRAPH: {:.5f}'.format(pve_graph))
pve_smpl_per_frame = np.concatenate(pve_smpl_per_frame, axis=0)
pve_graph_per_frame = np.concatenate(pve_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_per_frame.npy'), pve_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_graph_per_frame.npy'), pve_graph_per_frame)
if 'pve_scale_corrected' in metrics:
pve_sc_smpl = pve_scale_corrected_smpl_sum / (num_samples * num_vertices)
print('PVE SC SMPL: {:.5f}'.format(pve_sc_smpl))
pve_sc_graph = pve_scale_corrected_graph_sum / (num_samples * num_vertices)
print('PVE SC GRAPH: {:.5f}'.format(pve_sc_graph))
pve_scale_corrected_smpl_per_frame = np.concatenate(pve_scale_corrected_smpl_per_frame, axis=0)
pve_scale_corrected_graph_per_frame = np.concatenate(pve_scale_corrected_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_scale_corrected_per_frame.npy'),
pve_scale_corrected_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_scale_corrected_graph_per_frame.npy'),
pve_scale_corrected_graph_per_frame)
if 'pve_pa' in metrics:
pve_pa_smpl = pve_pa_smpl_sum / (num_samples * num_vertices)
print('PVE PA SMPL: {:.5f}'.format(pve_pa_smpl))
pve_pa_graph = pve_pa_graph_sum / (num_samples * num_vertices)
print('PVE PA GRAPH: {:.5f}'.format(pve_pa_graph))
pve_pa_smpl_per_frame = np.concatenate(pve_pa_smpl_per_frame, axis=0)
pve_pa_graph_per_frame = np.concatenate(pve_pa_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'pve_pa_per_frame.npy'), pve_pa_smpl_per_frame)
np.save(os.path.join(save_path, 'pve_pa_graph_per_frame.npy'), pve_pa_graph_per_frame)
if 'pve-t' in metrics:
pvet = pvet_sum / (num_samples * num_vertices)
print('PVE-T: {:.5f}'.format(pvet))
pvet_per_frame = np.concatenate(pvet_per_frame, axis=0)
np.save(os.path.join(save_path, 'pvet_per_frame.npy'), pvet_per_frame)
if 'pve-t_scale_corrected' in metrics:
pvet_sc = pvet_scale_corrected_sum / (num_samples * num_vertices)
print('PVE-T SC: {:.5f}'.format(pvet_sc))
pvet_scale_corrected_per_frame = np.concatenate(pvet_scale_corrected_per_frame, axis=0)
np.save(os.path.join(save_path, 'pvet_scale_corrected_per_frame.npy'),
pvet_scale_corrected_per_frame)
if 'mpjpe' in metrics:
mpjpe_smpl = mpjpe_smpl_sum / (num_samples * num_joints3d)
print('MPJPE SMPL: {:.5f}'.format(mpjpe_smpl))
mpjpe_graph = mpjpe_graph_sum / (num_samples * num_joints3d)
print('MPJPE GRAPH: {:.5f}'.format(mpjpe_graph))
mpjpe_smpl_per_frame = np.concatenate(mpjpe_smpl_per_frame, axis=0)
mpjpe_graph_per_frame = np.concatenate(mpjpe_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'mpjpe_per_frame.npy'), mpjpe_smpl_per_frame)
np.save(os.path.join(save_path, 'mpjpe_graph_per_frame.npy'), mpjpe_graph_per_frame)
if 'mpjpe_scale_corrected' in metrics:
mpjpe_sc_smpl = mpjpe_scale_corrected_smpl_sum / (num_samples * num_joints3d)
print('MPJPE SC SMPL: {:.5f}'.format(mpjpe_sc_smpl))
mpjpe_sc_graph = mpjpe_scale_corrected_graph_sum / (num_samples * num_joints3d)
print('MPJPE SC GRAPH: {:.5f}'.format(mpjpe_sc_graph))
mpjpe_scale_corrected_smpl_per_frame = np.concatenate(
mpjpe_scale_corrected_smpl_per_frame, axis=0)
mpjpe_scale_corrected_graph_per_frame = np.concatenate(
mpjpe_scale_corrected_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'mpjpe_scale_corrected_per_frame.npy'),
mpjpe_scale_corrected_smpl_per_frame)
np.save(os.path.join(save_path, 'mpjpe_scale_corrected_graph_per_frame.npy'),
mpjpe_scale_corrected_graph_per_frame)
if 'j3d_rec_err' in metrics:
j3d_rec_err_smpl = j3d_rec_err_smpl_sum / (num_samples * num_joints3d)
print('Rec Err SMPL: {:.5f}'.format(j3d_rec_err_smpl))
j3d_rec_err_graph = j3d_rec_err_graph_sum / (num_samples * num_joints3d)
print('Rec Err GRAPH: {:.5f}'.format(j3d_rec_err_graph))
j3d_rec_err_smpl_per_frame = np.concatenate(j3d_rec_err_smpl_per_frame, axis=0)
j3d_rec_err_graph_per_frame = np.concatenate(j3d_rec_err_graph_per_frame, axis=0)
np.save(os.path.join(save_path, 'j3d_rec_err_per_frame.npy'),
j3d_rec_err_smpl_per_frame)
np.save(os.path.join(save_path, 'j3d_rec_err_graph_per_frame.npy'),
j3d_rec_err_graph_per_frame)
if 'pve_2d' in metrics:
pve_2d_smpl = pve_2d_smpl_sum / (num_samples * num_vertices)
print('PVE 2D SMPL: {:.5f}'.format(pve_2d_smpl))
pve_2d_graph = pve_2d_graph_sum / (num_samples * num_vertices)
print('PVE 2D GRAPH: {:.5f}'.format(pve_2d_graph))
if 'pve_2d_scale_corrected' in metrics:
pve_2d_sc_smpl = pve_2d_scale_corrected_smpl_sum / (num_samples * num_vertices)
print('PVE 2D SC SMPL: {:.5f}'.format(pve_2d_sc_smpl))
pve_2d_sc_graph = pve_2d_scale_corrected_graph_sum / (num_samples * num_vertices)
print('PVE 2D SC GRAPH: {:.5f}'.format(pve_2d_sc_graph))
if 'pve_2d_pa' in metrics:
pve_2d_pa_smpl = pve_2d_pa_smpl_sum / (num_samples * num_vertices)
print('PVE 2D PA SMPL: {:.5f}'.format(pve_2d_pa_smpl))
pve_2d_pa_graph = pve_2d_pa_graph_sum / (num_samples * num_vertices)
print('PVE 2D PA GRAPH: {:.5f}'.format(pve_2d_pa_graph))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', default=None, help='Path to network checkpoint')
parser.add_argument('--gpu', default="0", type=str, help='GPU')
args = parser.parse_args()
# Device
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# Load model
mesh = Mesh(device=device)
# Our pretrained networks have 5 residual blocks with 256 channels.
# You might want to change this if you use a different architecture.
model = CMR(mesh, 5, 256, pretrained_checkpoint=args.checkpoint, device=device)
model.to(device)
model.eval()
# Setup evaluation dataset
dataset_path = '/scratch2/as2562/datasets/3DPW/test'
dataset = PW3DEvalDataset(dataset_path, img_wh=config.INPUT_RES)
print("Eval examples found:", len(dataset))
# Metrics
metrics = ['pve', 'pve-t', 'pve_pa', 'pve-t_pa', 'mpjpe', 'j3d_rec_err',
'pve_2d', 'pve_2d_pa', 'pve_2d_scale_corrected',
'pve_scale_corrected', 'pve-t_scale_corrected', 'mpjpe_scale_corrected']
save_path = '/data/cvfs/as2562/GraphCMR/evaluations/3dpw'
if not os.path.exists(save_path):
os.makedirs(save_path)
# Run evaluation
evaluate_3dpw(model=model,
eval_dataset=dataset,
metrics=metrics,
device=device,
vis_save_path=save_path,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000)
| 52.824663 | 125 | 0.633353 | [
"BSD-3-Clause"
] | akashsengupta1997/GraphCMR | evaluate_3dpw_mine.py | 27,416 | Python |
# Copyright 2017-2020 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from src.api.entity import Entity
from .base import API
import json
from ..model.object_permission_model import ObjectPermissionModel
class User(API):
def __init__(self):
super(User, self).__init__()
@classmethod
def get_permissions(cls, identifier, acl_class):
entity = Entity.load_by_id_or_name(identifier, acl_class)
return cls.permissions(entity['id'], entity['aclClass']), entity['owner']
@classmethod
def permissions(cls, id, acl_class):
api = cls.instance()
response_data = api.call('permissions?id={}&aclClass={}'.format(id, acl_class.upper()), None)
if 'payload' in response_data and 'permissions' in response_data['payload']:
permissions = []
for permission_json in response_data['payload']['permissions']:
permission_object = ObjectPermissionModel.load(permission_json)
permission_object.parse_mask(True)
permissions.append(permission_object)
return permissions
else:
return []
@classmethod
def grant_permission(cls, identifier, acl_class, user_name, principal, mask):
api = cls.instance()
payload = {}
if acl_class is not None:
payload['aclClass'] = acl_class.upper()
if identifier is not None:
payload['id'] = identifier
if mask is not None:
payload['mask'] = mask
if principal is not None:
payload['principal'] = principal
if user_name is not None:
payload['userName'] = user_name
data = json.dumps(payload)
api.call('grant', data)
@classmethod
def change_owner(cls, user_name, class_name, object_id):
api = cls.instance()
response_data = api.call('/grant/owner?userName={}&aclClass={}&id={}'.format(
user_name, str(class_name).upper(), object_id), None, http_method='POST')
if 'payload' in response_data and 'entity' in response_data['payload']:
return response_data['payload']['entity']
if 'message' in response_data:
raise RuntimeError(response_data['message'])
else:
raise RuntimeError("Failed to change owner.")
@classmethod
def generate_user_token(cls, user_name, duration):
api = cls.instance()
query = '/user/token?name=%s' % user_name
if duration:
query = '&expiration='.join([query, str(duration)])
response_data = api.call(query, None)
if 'payload' in response_data and 'token' in response_data['payload']:
return response_data['payload']['token']
if 'message' in response_data:
raise RuntimeError(response_data['message'])
else:
raise RuntimeError("Failed to generate user token.")
| 40.529412 | 101 | 0.649057 | [
"Apache-2.0"
] | NShaforostov/cloud-pipeline | pipe-cli/src/api/user.py | 3,445 | Python |
import pytest
import numpy as np
import tensorflow as tf
import tensorflow.keras
import librosa
from kapre import STFT, Magnitude, Phase, Delta, InverseSTFT, ApplyFilterbank
from kapre.composed import (
get_melspectrogram_layer,
get_log_frequency_spectrogram_layer,
get_stft_mag_phase,
get_perfectly_reconstructing_stft_istft,
get_stft_magnitude_layer,
)
from utils import get_audio, save_load_compare
def _num_frame_valid(nsp_src, nsp_win, len_hop):
"""Computes the number of frames with 'valid' setting"""
return (nsp_src - (nsp_win - len_hop)) // len_hop
def _num_frame_same(nsp_src, len_hop):
"""Computes the number of frames with 'same' setting"""
return int(np.ceil(float(nsp_src) / len_hop))
def allclose_phase(a, b, atol=1e-3):
"""Testing phase.
Remember that a small error in complex value may lead to a large phase difference
if the norm is very small.
Therefore, it makes more sense to test it on the complex value itself rather than breaking it down to phase.
"""
np.testing.assert_allclose(np.sin(a), np.sin(b), atol=atol)
np.testing.assert_allclose(np.cos(a), np.cos(b), atol=atol)
def allclose_complex_numbers(a, b, atol=1e-3):
np.testing.assert_equal(np.shape(a), np.shape(b))
np.testing.assert_allclose(np.abs(a), np.abs(b), rtol=1e-5, atol=atol)
np.testing.assert_allclose(np.real(a), np.real(b), rtol=1e-5, atol=atol)
np.testing.assert_allclose(np.imag(a), np.imag(b), rtol=1e-5, atol=atol)
@pytest.mark.parametrize('n_fft', [1000])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [1, 2, 6])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_spectrogram_correctness(n_fft, hop_length, n_ch, data_format):
def _get_stft_model(following_layer=None):
# compute with kapre
stft_model = tensorflow.keras.models.Sequential()
stft_model.add(
STFT(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
window_fn=None,
pad_end=False,
input_data_format=data_format,
output_data_format=data_format,
input_shape=input_shape,
name='stft',
)
)
if following_layer is not None:
stft_model.add(following_layer)
return stft_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft # test with x2
# compute with librosa
S_ref = librosa.core.stft(
src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False
).T # (time, freq)
S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1
S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq
stft_model = _get_stft_model()
S_complex = stft_model.predict(batch_src)[0] # 3d representation
allclose_complex_numbers(S_ref, S_complex)
# test Magnitude()
stft_mag_model = _get_stft_model(Magnitude())
S = stft_mag_model.predict(batch_src)[0] # 3d representation
np.testing.assert_allclose(np.abs(S_ref), S, atol=2e-4)
# # test Phase()
stft_phase_model = _get_stft_model(Phase())
S = stft_phase_model.predict(batch_src)[0] # 3d representation
allclose_phase(np.angle(S_complex), S)
@pytest.mark.parametrize('n_fft', [512])
@pytest.mark.parametrize('sr', [22050])
@pytest.mark.parametrize('hop_length', [None, 256])
@pytest.mark.parametrize('n_ch', [2])
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('amin', [1e-5, 1e-3])
@pytest.mark.parametrize('dynamic_range', [120.0, 80.0])
@pytest.mark.parametrize('n_mels', [40])
@pytest.mark.parametrize('mel_f_min', [0.0])
@pytest.mark.parametrize('mel_f_max', [8000])
def test_melspectrogram_correctness(
n_fft, sr, hop_length, n_ch, data_format, amin, dynamic_range, n_mels, mel_f_min, mel_f_max
):
"""Test the correctness of melspectrogram.
Note that mel filterbank is tested separated
"""
def _get_melgram_model(return_decibel, amin, dynamic_range, input_shape=None):
# compute with kapre
melgram_model = get_melspectrogram_layer(
n_fft=n_fft,
sample_rate=sr,
n_mels=n_mels,
mel_f_min=mel_f_min,
mel_f_max=mel_f_max,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
return_decibel=return_decibel,
input_shape=input_shape,
db_amin=amin,
db_dynamic_range=dynamic_range,
)
return melgram_model
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
win_length = n_fft # test with x2
# compute with librosa
S_ref = librosa.feature.melspectrogram(
src_mono,
sr=sr,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
center=False,
power=1.0,
n_mels=n_mels,
fmin=mel_f_min,
fmax=mel_f_max,
).T
S_ref = np.expand_dims(S_ref, axis=2) # time, freq, ch=1
S_ref = np.tile(S_ref, [1, 1, n_ch]) # time, freq, ch=n_ch
if data_format == 'channels_first':
S_ref = np.transpose(S_ref, (2, 0, 1)) # ch, time, freq
# melgram
melgram_model = _get_melgram_model(
return_decibel=False, input_shape=input_shape, amin=None, dynamic_range=120.0
)
S = melgram_model.predict(batch_src)[0] # 3d representation
np.testing.assert_allclose(S_ref, S, atol=1e-4)
# log melgram
melgram_model = _get_melgram_model(
return_decibel=True, input_shape=input_shape, amin=amin, dynamic_range=dynamic_range
)
S = melgram_model.predict(batch_src)[0] # 3d representation
S_ref_db = librosa.power_to_db(S_ref, ref=1.0, amin=amin, top_db=dynamic_range)
np.testing.assert_allclose(
S_ref_db, S, rtol=3e-3
) # decibel is evaluated with relative tolerance
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_log_spectrogram_runnable(data_format):
"""test if log spectrogram layer works well"""
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=False)
@pytest.mark.xfail
def test_log_spectrogram_fail():
"""test if log spectrogram layer works well"""
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
_ = get_log_frequency_spectrogram_layer(input_shape, return_decibel=True, log_n_bins=200)
def test_delta():
"""test delta layer"""
specgrams = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
specgrams = np.reshape(specgrams, (1, -1, 1, 1)) # (b, t, f, ch)
delta_model = tensorflow.keras.models.Sequential()
delta_model.add(Delta(win_length=3, input_shape=(4, 1, 1), data_format='channels_last'))
delta_kapre = delta_model(specgrams)
delta_ref = np.array([0.5, 1.0, 1.0, 0.5], dtype=np.float32)
delta_ref = np.reshape(delta_ref, (1, -1, 1, 1)) # (b, t, f, ch)
np.testing.assert_allclose(delta_kapre, delta_ref)
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_mag_phase(data_format):
n_ch = 1
n_fft, hop_length, win_length = 512, 256, 512
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch)
mag_phase_layer = get_stft_mag_phase(
input_shape=input_shape,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
input_data_format=data_format,
output_data_format=data_format,
)
model = tensorflow.keras.models.Sequential()
model.add(mag_phase_layer)
mag_phase_kapre = model(batch_src)[0] # a 2d image shape
ch_axis = 0 if data_format == 'channels_first' else 2 # non-batch
mag_phase_ref = np.stack(
librosa.magphase(
librosa.stft(
src_mono, n_fft=n_fft, hop_length=hop_length, win_length=win_length, center=False,
).T
),
axis=ch_axis,
)
np.testing.assert_equal(mag_phase_kapre.shape, mag_phase_ref.shape)
# magnitude test
np.testing.assert_allclose(
np.take(mag_phase_kapre, [0,], axis=ch_axis),
np.take(mag_phase_ref, [0,], axis=ch_axis),
atol=2e-4,
)
# phase test - todo - yeah..
@pytest.mark.parametrize('waveform_data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('stft_data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('hop_ratio', [0.5, 0.25, 0.125])
def test_perfectly_reconstructing_stft_istft(waveform_data_format, stft_data_format, hop_ratio):
n_ch = 1
src_mono, batch_src, input_shape = get_audio(data_format=waveform_data_format, n_ch=n_ch)
time_axis = 1 if waveform_data_format == 'channels_first' else 0 # non-batch!
len_src = input_shape[time_axis]
n_fft = 2048
hop_length = int(2048 * hop_ratio)
n_added_frames = int(1 / hop_ratio) - 1
stft, istft = get_perfectly_reconstructing_stft_istft(
stft_input_shape=input_shape,
n_fft=n_fft,
hop_length=hop_length,
waveform_data_format=waveform_data_format,
stft_data_format=stft_data_format,
)
# Test - [STFT -> ISTFT]
model = tf.keras.models.Sequential([stft, istft])
recon_waveform = model(batch_src)
# trim off the pad_begin part
len_pad_begin = n_fft - hop_length
if waveform_data_format == 'channels_first':
recon_waveform = recon_waveform[:, :, len_pad_begin : len_pad_begin + len_src]
else:
recon_waveform = recon_waveform[:, len_pad_begin : len_pad_begin + len_src, :]
np.testing.assert_allclose(batch_src, recon_waveform, atol=1e-5)
# Test - [ISTFT -> STFT]
S = librosa.stft(src_mono, n_fft=n_fft, hop_length=hop_length).T.astype(
np.complex64
) # (time, freq)
ch_axis = 1 if stft_data_format == 'channels_first' else 3 # batch shape
S = np.expand_dims(S, (0, ch_axis))
model = tf.keras.models.Sequential([istft, stft])
recon_S = model(S)
# trim off the frames coming from zero-pad result
n = n_added_frames
n_added_frames += n
if stft_data_format == 'channels_first':
if n != 0:
S = S[:, :, n:-n, :]
recon_S = recon_S[:, :, n_added_frames:-n_added_frames, :]
else:
if n != 0:
S = S[:, n:-n, :, :]
recon_S = recon_S[:, n_added_frames:-n_added_frames, :, :]
np.testing.assert_equal(S.shape, recon_S.shape)
allclose_complex_numbers(S, recon_S)
def test_save_load():
"""test saving/loading of models that has stft, melspectorgrma, and log frequency."""
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
# test STFT save/load
save_load_compare(
STFT(input_shape=input_shape, pad_begin=True), batch_src, allclose_complex_numbers
)
# test melspectrogram save/load
save_load_compare(
get_melspectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test log frequency spectrogram save/load
save_load_compare(
get_log_frequency_spectrogram_layer(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test stft_mag_phase
save_load_compare(
get_stft_mag_phase(input_shape=input_shape, return_decibel=True),
batch_src,
np.testing.assert_allclose,
)
# test stft mag
save_load_compare(
get_stft_magnitude_layer(input_shape=input_shape), batch_src, np.testing.assert_allclose
)
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [STFT, InverseSTFT])
def test_wrong_input_data_format(layer):
_ = layer(input_data_format='weird_string')
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [STFT, InverseSTFT])
def test_wrong_input_data_format(layer):
_ = layer(output_data_format='weird_string')
@pytest.mark.xfail()
@pytest.mark.parametrize('layer', [Delta, ApplyFilterbank])
def test_wrong_data_format(layer):
_ = layer(data_format='weird_string')
if __name__ == '__main__':
pytest.main([__file__])
| 35.376045 | 112 | 0.682598 | [
"MIT"
] | Path-A/kapre | tests/test_time_frequency.py | 12,700 | Python |
#!/usr/bin/env python2
import sys
import re
import datetime
import hashlib
import optparse
import urllib2
# cheers Dirk :)
url = 'https://testssl.sh/mapping-rfc.txt'
for line in urllib2.urlopen(url):
cipher = line.split()
print cipher[1]+'(0'+cipher[0]+'),'
| 16.117647 | 42 | 0.686131 | [
"ECL-2.0",
"Apache-2.0"
] | Ameg-yag/TLS-Attacker | resources/cipher_suite_grabber.py | 274 | Python |
"""Ray constants used in the Python code."""
import logging
import math
import os
logger = logging.getLogger(__name__)
def env_integer(key, default):
if key in os.environ:
return int(os.environ[key])
return default
def direct_call_enabled():
return bool(int(os.environ.get("RAY_FORCE_DIRECT", "1")))
ID_SIZE = 20
# The default maximum number of bytes to allocate to the object store unless
# overridden by the user.
DEFAULT_OBJECT_STORE_MAX_MEMORY_BYTES = 20 * 10**9
# The default number of retries to call `put` when the object store is full.
DEFAULT_PUT_OBJECT_RETRIES = 5
# The default seconds for delay between calls to retry `put` when
# the object store is full. This delay is exponentially doubled up to
# DEFAULT_PUT_OBJECT_RETRIES times.
DEFAULT_PUT_OBJECT_DELAY = 1
# The smallest cap on the memory used by the object store that we allow.
# This must be greater than MEMORY_RESOURCE_UNIT_BYTES * 0.7
OBJECT_STORE_MINIMUM_MEMORY_BYTES = 75 * 1024 * 1024
# The default maximum number of bytes that the non-primary Redis shards are
# allowed to use unless overridden by the user.
DEFAULT_REDIS_MAX_MEMORY_BYTES = 10**10
# The smallest cap on the memory used by Redis that we allow.
REDIS_MINIMUM_MEMORY_BYTES = 10**7
# Default resource requirements for actors when no resource requirements are
# specified.
DEFAULT_ACTOR_METHOD_CPU_SIMPLE = 1
DEFAULT_ACTOR_CREATION_CPU_SIMPLE = 0
# Default resource requirements for actors when some resource requirements are
# specified in .
DEFAULT_ACTOR_METHOD_CPU_SPECIFIED = 0
DEFAULT_ACTOR_CREATION_CPU_SPECIFIED = 1
# Default number of return values for each actor method.
DEFAULT_ACTOR_METHOD_NUM_RETURN_VALS = 1
# If a remote function or actor (or some other export) has serialized size
# greater than this quantity, print an warning.
PICKLE_OBJECT_WARNING_SIZE = 10**7
# If remote functions with the same source are imported this many times, then
# print a warning.
DUPLICATE_REMOTE_FUNCTION_THRESHOLD = 100
# The maximum resource quantity that is allowed. TODO(rkn): This could be
# relaxed, but the current implementation of the node manager will be slower
# for large resource quantities due to bookkeeping of specific resource IDs.
MAX_RESOURCE_QUANTITY = 100000
# Each memory "resource" counts as this many bytes of memory.
MEMORY_RESOURCE_UNIT_BYTES = 50 * 1024 * 1024
# Number of units 1 resource can be subdivided into.
MIN_RESOURCE_GRANULARITY = 0.0001
# Fraction of plasma memory that can be reserved. It is actually 70% but this
# is set to 69% to leave some headroom.
PLASMA_RESERVABLE_MEMORY_FRACTION = 0.69
def round_to_memory_units(memory_bytes, round_up):
"""Round bytes to the nearest memory unit."""
return from_memory_units(to_memory_units(memory_bytes, round_up))
def from_memory_units(memory_units):
"""Convert from memory units -> bytes."""
return memory_units * MEMORY_RESOURCE_UNIT_BYTES
def to_memory_units(memory_bytes, round_up):
"""Convert from bytes -> memory units."""
value = memory_bytes / MEMORY_RESOURCE_UNIT_BYTES
if value < 1:
raise ValueError(
"The minimum amount of memory that can be requested is {} bytes, "
"however {} bytes was asked.".format(MEMORY_RESOURCE_UNIT_BYTES,
memory_bytes))
if isinstance(value, float) and not value.is_integer():
# TODO(ekl) Ray currently does not support fractional resources when
# the quantity is greater than one. We should fix memory resources to
# be allocated in units of bytes and not 100MB.
if round_up:
value = int(math.ceil(value))
else:
value = int(math.floor(value))
return int(value)
# Different types of Ray errors that can be pushed to the driver.
# TODO(rkn): These should be defined in flatbuffers and must be synced with
# the existing C++ definitions.
WAIT_FOR_CLASS_PUSH_ERROR = "wait_for_class"
PICKLING_LARGE_OBJECT_PUSH_ERROR = "pickling_large_object"
WAIT_FOR_FUNCTION_PUSH_ERROR = "wait_for_function"
TASK_PUSH_ERROR = "task"
REGISTER_REMOTE_FUNCTION_PUSH_ERROR = "register_remote_function"
FUNCTION_TO_RUN_PUSH_ERROR = "function_to_run"
VERSION_MISMATCH_PUSH_ERROR = "version_mismatch"
CHECKPOINT_PUSH_ERROR = "checkpoint"
REGISTER_ACTOR_PUSH_ERROR = "register_actor"
WORKER_CRASH_PUSH_ERROR = "worker_crash"
WORKER_DIED_PUSH_ERROR = "worker_died"
WORKER_POOL_LARGE_ERROR = "worker_pool_large"
PUT_RECONSTRUCTION_PUSH_ERROR = "put_reconstruction"
INFEASIBLE_TASK_ERROR = "infeasible_task"
RESOURCE_DEADLOCK_ERROR = "resource_deadlock"
REMOVED_NODE_ERROR = "node_removed"
MONITOR_DIED_ERROR = "monitor_died"
LOG_MONITOR_DIED_ERROR = "log_monitor_died"
REPORTER_DIED_ERROR = "reporter_died"
DASHBOARD_DIED_ERROR = "dashboard_died"
RAYLET_CONNECTION_ERROR = "raylet_connection_error"
# Abort autoscaling if more than this number of errors are encountered. This
# is a safety feature to prevent e.g. runaway node launches.
AUTOSCALER_MAX_NUM_FAILURES = env_integer("AUTOSCALER_MAX_NUM_FAILURES", 5)
# The maximum number of nodes to launch in a single request.
# Multiple requests may be made for this batch size, up to
# the limit of AUTOSCALER_MAX_CONCURRENT_LAUNCHES.
AUTOSCALER_MAX_LAUNCH_BATCH = env_integer("AUTOSCALER_MAX_LAUNCH_BATCH", 5)
# Max number of nodes to launch at a time.
AUTOSCALER_MAX_CONCURRENT_LAUNCHES = env_integer(
"AUTOSCALER_MAX_CONCURRENT_LAUNCHES", 10)
# Interval at which to perform autoscaling updates.
AUTOSCALER_UPDATE_INTERVAL_S = env_integer("AUTOSCALER_UPDATE_INTERVAL_S", 5)
# The autoscaler will attempt to restart Ray on nodes it hasn't heard from
# in more than this interval.
AUTOSCALER_HEARTBEAT_TIMEOUT_S = env_integer("AUTOSCALER_HEARTBEAT_TIMEOUT_S",
30)
# The reporter will report its statistics this often (milliseconds).
REPORTER_UPDATE_INTERVAL_MS = env_integer("REPORTER_UPDATE_INTERVAL_MS", 2500)
# Max number of retries to AWS (default is 5, time increases exponentially)
BOTO_MAX_RETRIES = env_integer("BOTO_MAX_RETRIES", 12)
# Max number of retries to create an EC2 node (retry different subnet)
BOTO_CREATE_MAX_RETRIES = env_integer("BOTO_CREATE_MAX_RETRIES", 5)
LOGGER_FORMAT = (
"%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s -- %(message)s")
LOGGER_FORMAT_HELP = "The logging format. default='{}'".format(LOGGER_FORMAT)
LOGGER_LEVEL = "info"
LOGGER_LEVEL_CHOICES = ["debug", "info", "warning", "error", "critical"]
LOGGER_LEVEL_HELP = ("The logging level threshold, choices=['debug', 'info',"
" 'warning', 'error', 'critical'], default='info'")
# A constant indicating that an actor doesn't need reconstructions.
NO_RECONSTRUCTION = 0
# A constant indicating that an actor should be reconstructed infinite times.
INFINITE_RECONSTRUCTION = 2**30
# Constants used to define the different process types.
PROCESS_TYPE_REAPER = "reaper"
PROCESS_TYPE_MONITOR = "monitor"
PROCESS_TYPE_RAYLET_MONITOR = "raylet_monitor"
PROCESS_TYPE_LOG_MONITOR = "log_monitor"
PROCESS_TYPE_REPORTER = "reporter"
PROCESS_TYPE_DASHBOARD = "dashboard"
PROCESS_TYPE_WORKER = "worker"
PROCESS_TYPE_RAYLET = "raylet"
PROCESS_TYPE_PLASMA_STORE = "plasma_store"
PROCESS_TYPE_REDIS_SERVER = "redis_server"
PROCESS_TYPE_WEB_UI = "web_ui"
LOG_MONITOR_MAX_OPEN_FILES = 200
# A constant used as object metadata to indicate the object is raw binary.
RAW_BUFFER_METADATA = b"RAW"
# A constant used as object metadata to indicate the object is pickled. This
# format is only ever used for Python inline task argument values.
PICKLE_BUFFER_METADATA = b"PICKLE"
# A constant used as object metadata to indicate the object is pickle5 format.
PICKLE5_BUFFER_METADATA = b"PICKLE5"
AUTOSCALER_RESOURCE_REQUEST_CHANNEL = b"autoscaler_resource_request"
# The default password to prevent redis port scanning attack.
# Hex for ray.
REDIS_DEFAULT_PASSWORD = "5241590000000000"
# The default ip address to bind to.
NODE_DEFAULT_IP = "127.0.0.1"
| 39.673267 | 78 | 0.774644 | [
"Apache-2.0"
] | stephanie-wang/ray | python/ray/ray_constants.py | 8,014 | Python |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import TransformerMixin
from bugbug.utils import numpy_to_dict
class KerasTextToSequences(BaseEstimator, TransformerMixin):
def __init__(self, maxlen, vocab_size):
self.maxlen = maxlen
self.tokenizer = Tokenizer(num_words=vocab_size)
def fit(self, x, y=None):
self.tokenizer.fit_on_texts(x)
return self
def transform(self, data):
sequences = self.tokenizer.texts_to_sequences(data)
return pad_sequences(sequences, maxlen=self.maxlen)
class KerasClassifier(BaseEstimator, ClassifierMixin):
def __init__(self, epochs, batch_size):
self.epochs = epochs
self.batch_size = batch_size
def fit(self, X, y):
X_dict = numpy_to_dict(X)
self.model = self.model_creator(X_dict, y)
self.model.fit(X_dict, y, epochs=self.epochs, batch_size=self.batch_size, verbose=1)
return self
def predict_proba(self, X):
return self.model.predict(numpy_to_dict(X))
def predict(self, X):
return self.predict_proba(X).argmax(axis=-1)
| 30.875 | 92 | 0.712551 | [
"MPL-2.0"
] | Delkhaz/bugbug | bugbug/nn.py | 1,482 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 15 16:02:16 2018
@author: ning
"""
import os
working_dir = ''
import pandas as pd
pd.options.mode.chained_assignment = None
import numpy as np
from utils import (cv_counts)
saving_dir = '../results/cv_counts'
if not os.path.exists(saving_dir):
os.mkdir(saving_dir)
# Exp 1
for participant in ['AC', 'CL', 'FW', 'HB', 'KK', 'LM', 'MC', 'MP1', 'MP2', 'NN', 'RP','SD', 'TJ', 'TS', 'WT']:
experiment = 'pos'
df = pd.read_csv(os.path.join(working_dir,'../data/PoSdata.csv'))
df = df[df.columns[1:]]
df.columns = ['participant',
'blocks',
'trials',
'firstgabor',
'success',
'tilted',
'correct',
'RT_correct',
'awareness',
'RT_awareness',
'confidence',
'RT_confidence']
df_sub = df[df['participant'] == participant]
# make sure all the attributes are either 0 or 1
df_sub.loc[:,'success' ] = df_sub.loc[:,'success' ].values - 1
df_sub.loc[:,'awareness' ] = df_sub.loc[:,'awareness' ].values - 1
df_sub.loc[:,'confidence'] = df_sub.loc[:,'confidence'].values - 1
##################################################################
np.random.seed(12345)
# use all 6 possible features
feature_names = [
'correct',
'awareness',
'confidence',
'RT_correct',
'RT_awareness',
'RT_confidence']
target_name = 'success'
results = dict(sub = [],
window = [],
fold = [],
)
for name in feature_names:
results['{}_high_cond_{}_low'.format(target_name,name)] = []
results['{}_high_cond_{}_high'.format(target_name,name)] = []
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = cv_counts(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'Pos_6_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv
################################################################################
# use success, awareness, and confidence as features
np.random.seed(12345)
# use judgement features
feature_names = [
'correct',
'awareness',
'confidence',]
target_name = 'success'
results = dict(sub = [],
window = [],
fold = [],
)
for name in feature_names:
results['{}_high_cond_{}_low'.format(target_name,name)] = []
results['{}_high_cond_{}_high'.format(target_name,name)] = []
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = cv_counts(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'Pos_3_1_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv
###############################################################################
# use reactimes as features
np.random.seed(12345)
# use all 6 possible features
feature_names = [
'RT_correct',
'RT_awareness',
'RT_confidence']
target_name = 'success'
results = dict(sub = [],
window = [],
fold = [],
)
for name in feature_names:
results['{}_high_cond_{}_low'.format(target_name,name)] = []
results['{}_high_cond_{}_high'.format(target_name,name)] = []
for n_back in np.arange(1,5): # loop through the number of trials looking back
# this is the part that is redundent and the code is long
results = cv_counts(
df_sub,
feature_names,
target_name,
results,
participant,
experiment,
window=n_back,
)
temp = pd.DataFrame(results)
temp.to_csv(os.path.join(saving_dir,'Pos_RT_features (cv_count)_{}.csv'.format(participant)),index=False) # save as a csv
| 33.760234 | 126 | 0.411744 | [
"MIT"
] | nmningmei/metacognition | scripts/classifcation_pos_n_trials_back (cv counts).py | 5,773 | Python |
# -*- coding: utf-8 -*-
import os
import torch
import torch.nn as nn
from supar.models import (BiaffineDependencyModel, CRF2oDependencyModel,
CRFDependencyModel, VIDependencyModel)
from supar.parsers.parser import Parser
from supar.utils import Config, Dataset, Embedding
from supar.utils.common import BOS, PAD, UNK
from supar.utils.field import ChartField, Field, RawField, SubwordField
from supar.utils.fn import ispunct
from supar.utils.logging import get_logger, progress_bar
from supar.utils.metric import AttachmentMetric
from supar.utils.transform import CoNLL
logger = get_logger(__name__)
class BiaffineDependencyParser(Parser):
r"""
The implementation of Biaffine Dependency Parser :cite:`dozat-etal-2017-biaffine`.
"""
NAME = 'biaffine-dependency'
MODEL = BiaffineDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.TAG = self.transform.CPOS
self.ARC, self.REL = self.transform.HEAD, self.transform.DEPREL
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000,
punct=False, tree=True, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
tree=True, proj=False, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'biaffine-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('biaffine-dep-en')
>>> parser = Parser.load('./ptb.biaffine.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.softmax(-1).unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
r"""
Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary.
Required if taking words as encoder input.
Default: 2.
fix_len (int):
The max length of all subword pieces. The excess part of each piece will be truncated.
Required if using CharLSTM/BERT.
Default: 20.
kwargs (dict):
A dict holding the unconsumed arguments.
"""
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path) or './', exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
TAG, CHAR, ELMO, BERT = None, None, None, None
if args.encoder != 'lstm':
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
WORD = SubwordField('words',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
WORD.vocab = t.get_vocab()
else:
WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)
if 'tag' in args.feat:
TAG = Field('tags', bos=BOS)
if 'char' in args.feat:
CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)
if 'elmo' in args.feat:
from allennlp.modules.elmo import batch_to_ids
ELMO = RawField('elmo')
ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)
if 'bert' in args.feat:
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
BERT = SubwordField('bert',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
BERT.vocab = t.get_vocab()
TEXT = RawField('texts')
ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)
REL = Field('rels', bos=BOS)
transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=ARC, DEPREL=REL)
train = Dataset(transform, args.train)
if args.encoder == 'lstm':
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
if TAG is not None:
TAG.build(train)
if CHAR is not None:
CHAR.build(train)
REL.build(train)
args.update({
'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,
'n_rels': len(REL.vocab),
'n_tags': len(TAG.vocab) if TAG is not None else None,
'n_chars': len(CHAR.vocab) if CHAR is not None else None,
'char_pad_index': CHAR.pad_index if CHAR is not None else None,
'bert_pad_index': BERT.pad_index if BERT is not None else None,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index
})
logger.info(f"{transform}")
logger.info("Building the model")
model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)
logger.info(f"{model}\n")
return cls(args, model, transform)
class CRFDependencyParser(BiaffineDependencyParser):
r"""
The implementation of first-order CRF Dependency Parser :cite:`zhang-etal-2020-efficient`.
"""
NAME = 'crf-dependency'
MODEL = CRFDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
mbr=True, tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'crf-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('crf-dep-en')
>>> parser = Parser.load('./ptb.crf.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_rel, arcs, rels, mask, self.args.mbr, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_rel = self.model(words, feats)
if self.args.mbr:
s_arc = self.model.crf(s_arc, mask, mbr=True)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
class CRF2oDependencyParser(BiaffineDependencyParser):
r"""
The implementation of second-order CRF Dependency Parser :cite:`zhang-etal-2020-efficient`.
"""
NAME = 'crf2o-dependency'
MODEL = CRF2oDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, mbr=True, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
mbr=True, tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
mbr=True, tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
mbr (bool):
If ``True``, performs MBR decoding. Default: ``True``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'crf2o-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('crf2o-dep-en')
>>> parser = Parser.load('./ptb.crf2o.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, sibs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, sibs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, sibs, rels, mask, self.args.mbr, self.args.partial)
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_sib, s_rel = self.model(words, feats)
if self.args.mbr:
s_arc = self.model.crf((s_arc, s_sib), mask, mbr=True)
arc_preds, rel_preds = self.model.decode(s_arc, s_sib, s_rel, mask, self.args.tree, self.args.mbr, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
arc_probs = s_arc if self.args.mbr else s_arc.softmax(-1)
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, arc_probs.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
@classmethod
def build(cls, path, min_freq=2, fix_len=20, **kwargs):
r"""
Build a brand-new Parser, including initialization of all data fields and model parameters.
Args:
path (str):
The path of the model to be saved.
min_freq (str):
The minimum frequency needed to include a token in the vocabulary. Default: 2.
fix_len (int):
The max length of all subword pieces. The excess part of each piece will be truncated.
Required if using CharLSTM/BERT.
Default: 20.
kwargs (dict):
A dict holding the unconsumed arguments.
"""
args = Config(**locals())
args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
os.makedirs(os.path.dirname(path) or './', exist_ok=True)
if os.path.exists(path) and not args.build:
parser = cls.load(**args)
parser.model = cls.MODEL(**parser.args)
parser.model.load_pretrained(parser.WORD.embed).to(args.device)
return parser
logger.info("Building the fields")
TAG, CHAR, ELMO, BERT = None, None, None, None
if args.encoder != 'lstm':
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
WORD = SubwordField('words',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
WORD.vocab = t.get_vocab()
else:
WORD = Field('words', pad=PAD, unk=UNK, bos=BOS, lower=True)
if 'tag' in args.feat:
TAG = Field('tags', bos=BOS)
if 'char' in args.feat:
CHAR = SubwordField('chars', pad=PAD, unk=UNK, bos=BOS, fix_len=args.fix_len)
if 'elmo' in args.feat:
from allennlp.modules.elmo import batch_to_ids
ELMO = RawField('elmo')
ELMO.compose = lambda x: batch_to_ids(x).to(WORD.device)
if 'bert' in args.feat:
from transformers import (AutoTokenizer, GPT2Tokenizer,
GPT2TokenizerFast)
t = AutoTokenizer.from_pretrained(args.bert)
BERT = SubwordField('bert',
pad=t.pad_token,
unk=t.unk_token,
bos=t.bos_token or t.cls_token,
fix_len=args.fix_len,
tokenize=t.tokenize,
fn=None if not isinstance(t, (GPT2Tokenizer, GPT2TokenizerFast)) else lambda x: ' '+x)
BERT.vocab = t.get_vocab()
TEXT = RawField('texts')
ARC = Field('arcs', bos=BOS, use_vocab=False, fn=CoNLL.get_arcs)
SIB = ChartField('sibs', bos=BOS, use_vocab=False, fn=CoNLL.get_sibs)
REL = Field('rels', bos=BOS)
transform = CoNLL(FORM=(WORD, TEXT, CHAR, ELMO, BERT), CPOS=TAG, HEAD=(ARC, SIB), DEPREL=REL)
train = Dataset(transform, args.train)
if args.encoder == 'lstm':
WORD.build(train, args.min_freq, (Embedding.load(args.embed, args.unk) if args.embed else None))
if TAG is not None:
TAG.build(train)
if CHAR is not None:
CHAR.build(train)
REL.build(train)
args.update({
'n_words': len(WORD.vocab) if args.encoder != 'lstm' else WORD.vocab.n_init,
'n_rels': len(REL.vocab),
'n_tags': len(TAG.vocab) if TAG is not None else None,
'n_chars': len(CHAR.vocab) if CHAR is not None else None,
'char_pad_index': CHAR.pad_index if CHAR is not None else None,
'bert_pad_index': BERT.pad_index if BERT is not None else None,
'pad_index': WORD.pad_index,
'unk_index': WORD.unk_index,
'bos_index': WORD.bos_index
})
logger.info(f"{transform}")
logger.info("Building the model")
model = cls.MODEL(**args).load_pretrained(WORD.embed if hasattr(WORD, 'embed') else None).to(args.device)
logger.info(f"{model}\n")
return cls(args, model, transform)
class VIDependencyParser(BiaffineDependencyParser):
r"""
The implementation of Dependency Parser using Variational Inference (:cite:`wang-tu-2020-second`).
"""
NAME = 'vi-dependency'
MODEL = VIDependencyModel
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def train(self, train, dev, test, buckets=32, batch_size=5000, update_steps=1,
punct=False, tree=False, proj=False, partial=False, verbose=True, **kwargs):
r"""
Args:
train/dev/test (list[list] or str):
Filenames of the train/dev/test datasets.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
update_steps (int):
Gradient accumulation steps. Default: 1.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs.
"""
return super().train(**Config().update(locals()))
def evaluate(self, data, buckets=8, batch_size=5000, punct=False,
tree=True, proj=True, partial=False, verbose=True, **kwargs):
r"""
Args:
data (str):
The data for evaluation, both list of instances and filename are allowed.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
punct (bool):
If ``False``, ignores the punctuation during evaluation. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
partial (bool):
``True`` denotes the trees are partially annotated. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating evaluation configs.
Returns:
The loss scalar and evaluation results.
"""
return super().evaluate(**Config().update(locals()))
def predict(self, data, pred=None, lang=None, buckets=8, batch_size=5000, prob=False,
tree=True, proj=True, verbose=True, **kwargs):
r"""
Args:
data (list[list] or str):
The data for prediction, both a list of instances and filename are allowed.
pred (str):
If specified, the predicted results will be saved to the file. Default: ``None``.
lang (str):
Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.
``None`` if tokenization is not required.
Default: ``None``.
buckets (int):
The number of buckets that sentences are assigned to. Default: 32.
batch_size (int):
The number of tokens in each batch. Default: 5000.
prob (bool):
If ``True``, outputs the probabilities. Default: ``False``.
tree (bool):
If ``True``, ensures to output well-formed trees. Default: ``False``.
proj (bool):
If ``True``, ensures to output projective trees. Default: ``False``.
verbose (bool):
If ``True``, increases the output verbosity. Default: ``True``.
kwargs (dict):
A dict holding unconsumed arguments for updating prediction configs.
Returns:
A :class:`~supar.utils.Dataset` object that stores the predicted results.
"""
return super().predict(**Config().update(locals()))
@classmethod
def load(cls, path, reload=False, src=None, **kwargs):
r"""
Loads a parser with data fields and pretrained model parameters.
Args:
path (str):
- a string with the shortcut name of a pretrained model defined in ``supar.MODEL``
to load from cache or download, e.g., ``'vi-dep-en'``.
- a local path to a pretrained model, e.g., ``./<path>/model``.
reload (bool):
Whether to discard the existing cache and force a fresh download. Default: ``False``.
src (str):
Specifies where to download the model.
``'github'``: github release page.
``'hlt'``: hlt homepage, only accessible from 9:00 to 18:00 (UTC+8).
Default: None.
kwargs (dict):
A dict holding unconsumed arguments for updating training configs and initializing the model.
Examples:
>>> from supar import Parser
>>> parser = Parser.load('vi-dep-en')
>>> parser = Parser.load('./ptb.vi.dep.lstm.char')
"""
return super().load(path, reload, src, **kwargs)
def _train(self, loader):
self.model.train()
bar, metric = progress_bar(loader), AttachmentMetric()
for i, batch in enumerate(bar, 1):
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)
loss = loss / self.args.update_steps
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.args.clip)
if i % self.args.update_steps == 0:
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
metric(arc_preds, rel_preds, arcs, rels, mask)
bar.set_postfix_str(f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f} - {metric}")
logger.info(f"{bar.postfix}")
@torch.no_grad()
def _evaluate(self, loader):
self.model.eval()
total_loss, metric = 0, AttachmentMetric()
for batch in loader:
words, texts, *feats, arcs, rels = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
s_arc, s_sib, s_rel = self.model(words, feats)
loss, s_arc = self.model.loss(s_arc, s_sib, s_rel, arcs, rels, mask)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
if self.args.partial:
mask &= arcs.ge(0)
# ignore all punctuation if not specified
if not self.args.punct:
mask.masked_scatter_(mask, ~mask.new_tensor([ispunct(w) for s in texts for w in s]))
total_loss += loss.item()
metric(arc_preds, rel_preds, arcs, rels, mask)
total_loss /= len(loader)
return total_loss, metric
@torch.no_grad()
def _predict(self, loader):
self.model.eval()
preds = {'arcs': [], 'rels': [], 'probs': [] if self.args.prob else None}
for batch in progress_bar(loader):
words, texts, *feats = batch
word_mask = words.ne(self.args.pad_index)
mask = word_mask if len(words.shape) < 3 else word_mask.any(-1)
# ignore the first token of each sentence
mask[:, 0] = 0
lens = mask.sum(1).tolist()
s_arc, s_sib, s_rel = self.model(words, feats)
s_arc = self.model.inference((s_arc, s_sib), mask)
arc_preds, rel_preds = self.model.decode(s_arc, s_rel, mask, self.args.tree, self.args.proj)
preds['arcs'].extend(arc_preds[mask].split(lens))
preds['rels'].extend(rel_preds[mask].split(lens))
if self.args.prob:
preds['probs'].extend([prob[1:i+1, :i+1].cpu() for i, prob in zip(lens, s_arc.unbind())])
preds['arcs'] = [seq.tolist() for seq in preds['arcs']]
preds['rels'] = [self.REL.vocab[seq.tolist()] for seq in preds['rels']]
return preds
| 45.484171 | 126 | 0.555988 | [
"MIT"
] | LiBinNLP/HOSDP | supar/parsers/dep.py | 48,850 | Python |
import argparse
import logging
from datetime import datetime
from python_liftbridge import ErrNoSuchStream
from python_liftbridge import ErrStreamExists
from python_liftbridge import Lift
from python_liftbridge import Stream
def parse_arguments():
'''Argument parsing for the script'''
parser = argparse.ArgumentParser(
description='Liftbridge sub script.',
)
parser.add_argument(
'subject',
metavar='subject',
)
parser.add_argument(
'stream',
metavar='stream',
)
parser.add_argument(
'-s',
'--server',
metavar='s',
nargs='?',
default='127.0.0.1:9292',
help='(default: %(default)s)',
)
parser.add_argument(
'-t',
'--timestamp',
action='store_true',
help='Display timestamps',
)
parser.add_argument(
'-c',
'--create',
action='store_true',
help="Creates the stream in case it doesn't exist",
)
parser.add_argument(
'-d',
'--debug',
action='store_true',
help='Shows debug logs',
)
return parser.parse_args()
def main():
args = parse_arguments()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
client = Lift(ip_address=args.server)
count = 0
if args.create:
try:
client.create_stream(Stream(args.subject, args.stream))
except ErrStreamExists:
pass
try:
for message in client.subscribe(
Stream(
args.subject,
args.stream,
).start_at_earliest_received(),
):
print("{} [#{}] Received on [{} - {}]: '{}'".format(
datetime.fromtimestamp(
int(message.timestamp) /
1000000000,
), count, args.subject, args.stream, message.value.decode('utf-8'),
))
count = count + 1
except ErrNoSuchStream:
print("The stream {} doesn't exist. With -c or --create it's creation can be forced."
.format(args.stream))
main()
| 23.788889 | 93 | 0.554414 | [
"Apache-2.0"
] | LaPetiteSouris/python-liftbridge | examples/lift-sub.py | 2,141 | Python |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.internal import story_runner
from telemetry.page import page
from telemetry.page import legacy_page_test
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.testing import fakes
from telemetry.util import wpr_modes
def SetUpPageRunnerArguments(options):
parser = options.CreateParser()
story_runner.AddCommandLineArgs(parser)
options.MergeDefaultValues(parser.get_default_values())
story_runner.ProcessCommandLineArgs(parser, options)
class DummyTest(legacy_page_test.LegacyPageTest):
def ValidateAndMeasurePage(self, *_):
pass
class SharedPageStateTests(unittest.TestCase):
def setUp(self):
self.options = fakes.CreateBrowserFinderOptions()
self.options.use_live_sites = False
self.options.output_formats = ['none']
self.options.suppress_gtest_report = True
def testUseLiveSitesFlagSet(self):
self.options.use_live_sites = True
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_OFF)
self.assertTrue(run_state.platform.network_controller.use_live_traffic)
def testUseLiveSitesFlagUnset(self):
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_REPLAY)
self.assertFalse(run_state.platform.network_controller.use_live_traffic)
def testWPRRecordEnable(self):
self.options.browser_options.wpr_mode = wpr_modes.WPR_RECORD
run_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_module.StorySet())
self.assertTrue(run_state.platform.network_controller.is_open)
self.assertEquals(run_state.platform.network_controller.wpr_mode,
wpr_modes.WPR_RECORD)
self.assertFalse(run_state.platform.network_controller.use_live_traffic)
def testConstructorCallsSetOptions(self):
test = DummyTest()
shared_page_state.SharedPageState(
test, self.options, story_module.StorySet())
self.assertEqual(test.options, self.options)
def assertUserAgentSetCorrectly(
self, shared_page_state_class, expected_user_agent):
story = page.Page(
'http://www.google.com',
shared_page_state_class=shared_page_state_class)
test = DummyTest()
story_set = story_module.StorySet()
story_set.AddStory(story)
story.shared_state_class(test, self.options, story_set)
browser_options = self.options.browser_options
actual_user_agent = browser_options.browser_user_agent_type
self.assertEqual(expected_user_agent, actual_user_agent)
def testPageStatesUserAgentType(self):
self.assertUserAgentSetCorrectly(
shared_page_state.SharedMobilePageState, 'mobile')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedDesktopPageState, 'desktop')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedTabletPageState, 'tablet')
self.assertUserAgentSetCorrectly(
shared_page_state.Shared10InchTabletPageState, 'tablet_10_inch')
self.assertUserAgentSetCorrectly(
shared_page_state.SharedPageState, None)
def testBrowserStartupURLSetCorrectly(self):
story_set = story_module.StorySet()
google_page = page.Page(
'http://www.google.com',
startup_url='http://www.google.com', page_set=story_set)
example_page = page.Page(
'https://www.example.com',
startup_url='https://www.example.com', page_set=story_set)
gmail_page = page.Page(
'https://www.gmail.com',
startup_url='https://www.gmail.com', page_set=story_set)
for p in (google_page, example_page, gmail_page):
story_set.AddStory(p)
shared_state = shared_page_state.SharedPageState(
DummyTest(), self.options, story_set)
for p in (google_page, example_page, gmail_page):
shared_state.WillRunStory(p)
self.assertEquals(
p.startup_url, self.options.browser_options.startup_url)
| 38.646552 | 76 | 0.758867 | [
"BSD-3-Clause"
] | bopopescu/catapult-2 | telemetry/telemetry/page/shared_page_state_unittest.py | 4,483 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class ProxyProtocolPolicy(pulumi.CustomResource):
instance_ports: pulumi.Output[list]
"""
List of instance ports to which the policy
should be applied. This can be specified if the protocol is SSL or TCP.
"""
load_balancer: pulumi.Output[str]
"""
The load balancer to which the policy
should be attached.
"""
def __init__(__self__, resource_name, opts=None, instance_ports=None, load_balancer=None, __name__=None, __opts__=None):
"""
Provides a proxy protocol policy, which allows an ELB to carry a client connection information to a backend.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[list] instance_ports: List of instance ports to which the policy
should be applied. This can be specified if the protocol is SSL or TCP.
:param pulumi.Input[str] load_balancer: The load balancer to which the policy
should be attached.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
if instance_ports is None:
raise TypeError('Missing required property instance_ports')
__props__['instance_ports'] = instance_ports
if load_balancer is None:
raise TypeError('Missing required property load_balancer')
__props__['load_balancer'] = load_balancer
super(ProxyProtocolPolicy, __self__).__init__(
'aws:ec2/proxyProtocolPolicy:ProxyProtocolPolicy',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 40.753623 | 124 | 0.68101 | [
"ECL-2.0",
"Apache-2.0"
] | lemonade-hq/pulumi-aws | sdk/python/pulumi_aws/ec2/proxy_protocol_policy.py | 2,812 | Python |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ============================================================================ #
# Project : Airbnb #
# Version : 0.1.0 #
# File : split_names.py #
# Python : 3.8.0 #
# ---------------------------------------------------------------------------- #
# Author : John James #
# Company: DecisionScients #
# Email : [email protected] #
# ---------------------------------------------------------------------------- #
# Created : Tuesday, 7th January 2020 10:22:44 am #
# Last Modified: Tuesday, 7th January 2020 10:22:44 am #
# Modified By : John James ([email protected]>) #
# ---------------------------------------------------------------------------- #
# License: BSD #
# Copyright (c) 2020 DecisionScients #
# ============================================================================ #
#%%
import os
directory = "./data/raw/"
filenames = os.listdir(directory)
for filename in filenames:
name = filename.split(".")[0]
print(name)
# %%
| 52.7 | 80 | 0.253004 | [
"BSD-3-Clause"
] | decisionscients/Airbnb | src/lab/split_names.py | 1,581 | Python |
import numpy as np
from scipy.optimize import curve_fit
from ..data_generation import interp_reflectivity, ReflectivityGenerator
def q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001):
"""Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts."""
shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1)
shifted_qs = np.tile(q_values_input, (n_variants, 1)) + shift
interpolated_curves = np.zeros((n_variants, len(q_values_prediction)))
for i in range(n_variants):
interpolated_curves[i] = interp_reflectivity(q_values_prediction, shifted_qs[i], corrected_reflectivity)
return interpolated_curves, shift
def curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1):
"""Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors."""
scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1)
scaled_curves = np.zeros((n_variants, len(corrected_reflectivity)))
for i in range(n_variants):
scaled_curves[i] = corrected_reflectivity.copy() * scalings[i]
return scaled_curves, scalings
def curve_variant_log_mse(curve, variant_curves):
"""Calculate the log MSE of a curve and a :class:`ndarray` of curves"""
errors = np.log10(curve) - np.log10(variant_curves)
return np.mean(errors ** 2, axis=1)
def least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor,
fraction_bounds=(0.5, 0.5, 0.1)):
"""Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values."""
prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)[0]
start_values = np.array(prep_labels)[0]
bounds = ([val - bound * abs(val) for val, bound in zip(start_values, fraction_bounds)],
[val + bound * abs(val) for val, bound in zip(start_values, fraction_bounds)])
fit_result = curve_fit(fitting_model(q_values, sample, output_preprocessor), q_values, np.log10(data),
p0=start_values, bounds=bounds)
return output_preprocessor.restore_labels(np.atleast_2d(fit_result[0]))
def fitting_model(q_values, sample, output_preprocessor):
def log_refl_curve(q, *prep_labels):
generator = ReflectivityGenerator(q_values, sample)
restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))
model = generator.simulate_reflectivity(restored_labels, progress_bar=False)[0]
return np.log10(model)
return log_refl_curve
def log_mse_loss(prep_labels, data, generator, output_preprocessor):
"""MSE loss between a reflectivity curve and a model curve generated with the given normalized labels."""
restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))
model = generator.simulate_reflectivity(restored_labels,
progress_bar=False)[0]
loss = mean_squared_error(np.log10(data), np.log10(model))
return loss
def mean_squared_error(array1, array2):
"""Returns element-wise mean squared error between two arrays."""
if len(array1) != len(array2):
raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})')
else:
error = np.asarray(array1) - np.asarray(array2)
return np.mean(np.atleast_2d(error ** 2), axis=1)
| 49.929577 | 120 | 0.725811 | [
"MIT"
] | schreiber-lab/mlreflect | mlreflect/curve_fitter/minimizer.py | 3,545 | Python |
from sys import maxsize
class Group:
def __init__(self, name=None, header=None, footer=None, id=None):
self.name = name
self.header = header
self.footer = footer
self.id = id
def __repr__(self):
return"%s:%s:%s:%s" % (self.id, self.name, self.header, self.footer)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| 23.75 | 103 | 0.582456 | [
"Apache-2.0"
] | Docent321/python_traning | model/group.py | 570 | Python |
# -*- coding: utf-8 -*-
from datetime import datetime
import json
import time
import flask
from example.usermanagement.schema_marshmallow import AboutSchema
from example.usermanagement.schema_marshmallow import NoContentSchema
from example.usermanagement.schema_marshmallow import UserAvatarSchema
from example.usermanagement.schema_marshmallow import UserDigestSchema
from example.usermanagement.schema_marshmallow import UserIdPathSchema
from example.usermanagement.schema_marshmallow import UserSchema
from example.usermanagement.userlib import User
from example.usermanagement.userlib import UserAvatarNotFound
from example.usermanagement.userlib import UserLib
from example.usermanagement.userlib import UserNotFound
from hapic import Hapic
from hapic import MarshmallowProcessor
from hapic.data import HapicData
from hapic.data import HapicFile
from hapic.error.marshmallow import MarshmallowDefaultErrorBuilder
from hapic.ext.flask import FlaskContext
try: # Python 3.5+
from http import HTTPStatus
except ImportError:
from http import client as HTTPStatus
hapic = Hapic()
hapic.set_processor_class(MarshmallowProcessor)
class FlaskController(object):
@hapic.with_api_doc()
@hapic.output_body(AboutSchema())
def about(self):
"""
This endpoint allow to check that the API is running. This description
is generated from the docstring of the method.
"""
return {"version": "1.2.3", "datetime": datetime.now()}
@hapic.with_api_doc()
@hapic.output_body(UserDigestSchema(many=True))
def get_users(self):
"""
Obtain users list.
"""
return UserLib().get_users()
@hapic.with_api_doc()
@hapic.handle_exception(UserNotFound, HTTPStatus.NOT_FOUND)
@hapic.input_path(UserIdPathSchema())
@hapic.output_body(UserSchema())
def get_user(self, id, hapic_data: HapicData):
"""
Return a user taken from the list or return a 404
"""
return UserLib().get_user(int(hapic_data.path["id"]))
@hapic.with_api_doc()
# TODO - G.M - 2017-12-5 - Support input_forms ?
# TODO - G.M - 2017-12-5 - Support exclude, only ?
@hapic.input_body(UserSchema(exclude=("id",)))
@hapic.output_body(UserSchema())
def add_user(self, hapic_data: HapicData):
"""
Add a user to the list
"""
new_user = User(**hapic_data.body)
return UserLib().add_user(new_user)
@hapic.with_api_doc()
@hapic.handle_exception(UserNotFound, HTTPStatus.NOT_FOUND)
@hapic.output_body(NoContentSchema(), default_http_code=204)
@hapic.input_path(UserIdPathSchema())
def del_user(self, id, hapic_data: HapicData):
UserLib().del_user(int(hapic_data.path["id"]))
return NoContentSchema()
@hapic.with_api_doc()
@hapic.handle_exception(UserNotFound, HTTPStatus.NOT_FOUND)
@hapic.handle_exception(UserAvatarNotFound, HTTPStatus.NOT_FOUND)
@hapic.input_path(UserIdPathSchema())
@hapic.output_file(["image/png"])
def get_user_avatar(self, id, hapic_data: HapicData):
return HapicFile(
file_path=UserLib().get_user_avatar_path(user_id=(int(hapic_data.path["id"])))
)
@hapic.with_api_doc()
@hapic.handle_exception(UserNotFound, HTTPStatus.NOT_FOUND)
@hapic.handle_exception(UserAvatarNotFound, HTTPStatus.BAD_REQUEST)
@hapic.input_path(UserIdPathSchema())
@hapic.input_files(UserAvatarSchema())
@hapic.output_body(NoContentSchema(), default_http_code=204)
def update_user_avatar(self, id, hapic_data: HapicData):
UserLib().update_user_avatar(
user_id=int(hapic_data.path["id"]), avatar=hapic_data.files["avatar"]
)
def bind(self, app: flask.Flask):
app.add_url_rule("/about", view_func=self.about)
app.add_url_rule("/users/", view_func=self.get_users)
app.add_url_rule("/users/<id>", view_func=self.get_user)
app.add_url_rule("/users/", view_func=self.add_user, methods=["POST"])
app.add_url_rule("/users/<id>", view_func=self.del_user, methods=["DELETE"]) # nopep8
app.add_url_rule(
"/users/<id>/avatar", view_func=self.get_user_avatar, methods=["GET"]
) # nopep8
app.add_url_rule("/users/<id>/avatar", view_func=self.update_user_avatar, methods=["PUT"])
if __name__ == "__main__":
app = flask.Flask(__name__)
controllers = FlaskController()
controllers.bind(app)
hapic.set_context(FlaskContext(app, default_error_builder=MarshmallowDefaultErrorBuilder()))
print("")
print("")
print("GENERATING OPENAPI DOCUMENTATION")
doc_title = "Demo API documentation"
doc_description = (
"This documentation has been generated from "
"code. You can see it using swagger: "
"http://editor2.swagger.io/"
)
hapic.add_documentation_view("/doc/", doc_title, doc_description)
openapi_file_name = "api-documentation.json"
with open(openapi_file_name, "w") as openapi_file_handle:
openapi_file_handle.write(
json.dumps(hapic.generate_doc(title=doc_title, description=doc_description))
)
print("Documentation generated in {}".format(openapi_file_name))
time.sleep(1)
print("")
print("")
print("RUNNING FLASK SERVER NOW")
print("DOCUMENTATION AVAILABLE AT /doc/")
# Run app
app.run(host="127.0.0.1", port=8082, debug=True)
| 36.689189 | 98 | 0.707182 | [
"MIT"
] | algoo/hapic | example/usermanagement/serve_flask_marshmallow.py | 5,430 | Python |
import os
from aws_cdk import (
core,
aws_dynamodb as ddb,
aws_ec2 as ec2,
aws_ecs as ecs,
aws_ecr as ecr,
aws_iam as iam,
aws_logs as cwl,
aws_secretsmanager as sm,
aws_kinesis as ks,
)
class LogstashOutStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, ctx: object, ecr_repository: ecr.Repository, kinesis_stream: ks.Stream, state_table: ddb.Table, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self.ecr_repository = ecr_repository
self.kinesis_stream = kinesis_stream
self.state_table = state_table
service_name = "processor"
ctx_srv = getattr(ctx.outbound.services.pull, service_name)
self.vpc = ec2.Vpc.from_vpc_attributes(
self, "VPC",
**ctx.vpc_props.dict()
)
# CloudWatch Logs Group
self.log_group = cwl.LogGroup(
scope = self,
id = "logs"
)
# Create a new ECS cluster for our services
self.cluster = ecs.Cluster(
self,
vpc = self.vpc,
id = f"{id}_cluster"
)
cluster_name_output = core.CfnOutput(
scope=self,
id="cluster-name-out",
value=self.cluster.cluster_name,
export_name=f"{id}-cluster-name"
)
service_names_output = core.CfnOutput(
scope=self,
id="service-names-out",
value=service_name,
export_name=f"{id}-service-names"
)
# Create a role for ECS to interact with AWS APIs with standard permissions
self.ecs_exec_role = iam.Role(
scope = self,
id = "ecs_logstash-exec_role",
assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com"),
managed_policies = ([
iam.ManagedPolicy.from_aws_managed_policy_name(
"service-role/AmazonECSTaskExecutionRolePolicy")
])
)
# Grant ECS additional permissions to decrypt secrets from Secrets Manager that have been encrypted with our custom key
if getattr(ctx, "secrets_key_arn", None) is not None:
self.ecs_exec_role.add_to_policy(
iam.PolicyStatement(
actions = ["kms:Decrypt"],
effect = iam.Effect.ALLOW,
resources = [ctx.secrets_key_arn]
))
# Grant ECS permissions to log to our log group
self.log_group.grant_write(self.ecs_exec_role)
# Create a task role to grant permissions for Logstash to interact with AWS APIs
ecs_task_role = iam.Role(
scope = self,
id = f"{service_name}_task_role",
assumed_by = iam.ServicePrincipal("ecs-tasks.amazonaws.com")
)
# Add permissions for Logstash to send metrics to CloudWatch
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions = ["cloudwatch:PutMetricData"],
effect = iam.Effect.ALLOW,
resources = ["*"]
))
# Add permissions for Logstash to interact with our Kinesis queue
self.kinesis_stream.grant_read(ecs_task_role)
# Remove this when next version of kinesis module is released
# https://github.com/aws/aws-cdk/pull/6141
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions = ["kinesis:ListShards"],
effect = iam.Effect.ALLOW,
resources = [self.kinesis_stream.stream_arn]
))
# Add permissions for Logstash to store Kinesis Consumer Library (KCL) state tracking in DynamoDB
state_table.grant_full_access(ecs_task_role)
# Add permissions for Logstash to upload logs to S3 for archive
bucket_resources = []
for k, v in ctx_srv.variables.items():
if k.endswith("_log_bucket"):
bucket_resources.append('arn:aws:s3:::{0}'.format(v))
bucket_resources.append('arn:aws:s3:::{0}/*'.format(v))
ecs_task_role.add_to_policy(
iam.PolicyStatement(
actions=[
"s3:PutObject",
"s3:ListMultipartUploadParts",
"s3:ListBucket",
"s3:AbortMultipartUpload"
],
effect=iam.Effect.ALLOW,
resources=bucket_resources
))
# Task Definition
task_definition = ecs.FargateTaskDefinition(
scope = self,
id = f"{service_name}_task_definition",
cpu = ctx_srv.size.cpu,
memory_limit_mib = ctx_srv.size.ram,
execution_role = self.ecs_exec_role,
task_role = ecs_task_role,
)
log_driver = ecs.LogDriver.aws_logs(
log_group = self.log_group,
stream_prefix = service_name)
# Container Definition
container_vars = self.__get_container_vars(service_name, ctx, ctx_srv)
container = ecs.ContainerDefinition(
scope = self,
id = f"{service_name}_container_definition",
task_definition = task_definition,
image = ecs.ContainerImage.from_ecr_repository(self.ecr_repository, "latest"),
logging = log_driver,
**container_vars
)
# Service Definition
security_group = ec2.SecurityGroup(
scope = self,
id = f"{service_name}_sg",
vpc = self.vpc
)
service = ecs.FargateService(
scope = self,
id = f"{service_name}_fargate_service",
task_definition = task_definition,
cluster = self.cluster,
desired_count = getattr(ctx_srv, "desired_count", ctx.default_desired_count),
service_name = service_name,
security_group = security_group
)
scaling = service.auto_scale_task_count(
max_capacity = ctx_srv.scaling.max_capacity,
min_capacity = ctx_srv.scaling.min_capacity
)
scaling.scale_on_cpu_utilization(
id = "cpu_scaling",
target_utilization_percent = ctx_srv.scaling.target_utilization_percent,
scale_in_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_in_cooldown_seconds),
scale_out_cooldown = core.Duration.seconds(ctx_srv.scaling.scale_out_cooldown_seconds),
)
def __get_container_vars(self, service_name, ctx, ctx_srv):
# Prepare container defaults
container_vars = {}
container_environment = {
"ENV_STAGE": ctx.stage,
"SERVICE_NAME": service_name,
"DEBUG_OUTPUT": ctx.debug_output,
"LS_JAVA_OPTS": "-Xms256m -Xmx{0}m".format(ctx_srv.size.ram - 256),
"KINESIS_ENDPOINT": ctx.queue.kinesis_endpoint,
"KINESIS_STREAM_NAME": self.kinesis_stream.stream_name,
"AWS_REGION": ctx.aws_region,
"DYNAMODB_STATE_TABLE_NAME": self.state_table.table_name
}
container_secrets = {}
# Get and populate service-specific variables and secrets from context
if hasattr(ctx_srv, "variables"):
for k, v in ctx_srv.variables.items():
container_environment[k.upper()] = v
if hasattr(ctx_srv, "secrets"):
for k, v in ctx_srv.secrets.items():
sm_secret = sm.Secret.from_secret_arn(
scope = self,
id = f"{k}-secret",
secret_arn = v
)
ecs_secret = ecs.Secret.from_secrets_manager(sm_secret)
secret_env_key = "{0}_SECRET".format(k.upper())
container_secrets[secret_env_key] = ecs_secret
if container_environment:
container_vars["environment"] = container_environment
if container_secrets:
container_vars["secrets"] = container_secrets
return container_vars | 38.870813 | 169 | 0.58481 | [
"MIT"
] | originsecurity/telemetry | src/cdk/stacks/outbound/stack.py | 8,124 | Python |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the noise perturb augmentation model."""
from deepspeech.frontend.audio import AudioSegment
from deepspeech.frontend.augmentor.base import AugmentorBase
from deepspeech.frontend.utility import read_manifest
class NoisePerturbAugmentor(AugmentorBase):
"""Augmentation model for adding background noise.
:param rng: Random generator object.
:type rng: random.Random
:param min_snr_dB: Minimal signal noise ratio, in decibels.
:type min_snr_dB: float
:param max_snr_dB: Maximal signal noise ratio, in decibels.
:type max_snr_dB: float
:param noise_manifest_path: Manifest path for noise audio data.
:type noise_manifest_path: str
"""
def __init__(self, rng, min_snr_dB, max_snr_dB, noise_manifest_path):
self._min_snr_dB = min_snr_dB
self._max_snr_dB = max_snr_dB
self._rng = rng
self._noise_manifest = read_manifest(manifest_path=noise_manifest_path)
def __call__(self, x, uttid=None, train=True):
if not train:
return x
self.transform_audio(x)
return x
def transform_audio(self, audio_segment):
"""Add background noise audio.
Note that this is an in-place transformation.
:param audio_segment: Audio segment to add effects to.
:type audio_segment: AudioSegmenet|SpeechSegment
"""
noise_json = self._rng.choice(self._noise_manifest, 1, replace=False)[0]
if noise_json['duration'] < audio_segment.duration:
raise RuntimeError("The duration of sampled noise audio is smaller "
"than the audio segment to add effects to.")
diff_duration = noise_json['duration'] - audio_segment.duration
start = self._rng.uniform(0, diff_duration)
end = start + audio_segment.duration
noise_segment = AudioSegment.slice_from_file(
noise_json['audio_filepath'], start=start, end=end)
snr_dB = self._rng.uniform(self._min_snr_dB, self._max_snr_dB)
audio_segment.add_noise(
noise_segment, snr_dB, allow_downsampling=True, rng=self._rng)
| 42.046154 | 80 | 0.71094 | [
"Apache-2.0"
] | qq1440837150/DeepSpeech | deepspeech/frontend/augmentor/noise_perturb.py | 2,733 | Python |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.